Error Trace

[Home]

Bug # 154

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
19 typedef signed char __s8;
20 typedef unsigned char __u8;
22 typedef short __s16;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
32 typedef __u16 __le16;
33 typedef __u16 __be16;
34 typedef __u32 __le32;
35 typedef __u32 __be32;
36 typedef __u64 __le64;
37 typedef __u64 __be64;
39 typedef __u16 __sum16;
40 typedef __u32 __wsum;
257 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
108 typedef __u32 uint32_t;
111 typedef __u64 uint64_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
161 typedef u64 phys_addr_t;
166 typedef phys_addr_t resource_size_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
125 typedef void (*ctor_fn_t)();
67 struct ctl_table ;
279 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
58 struct device ;
64 struct net_device ;
467 struct file_operations ;
479 struct completion ;
480 struct pt_regs ;
27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ;
556 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
111 struct timespec ;
112 struct compat_timespec ;
113 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
113 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
113 struct pollfd ;
113 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
113 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ;
113 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ;
39 struct page ;
26 struct task_struct ;
27 struct mm_struct ;
288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ;
66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ;
13 typedef unsigned long pteval_t;
14 typedef unsigned long pmdval_t;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
19 struct __anonstruct_pte_t_32 { pteval_t pte; } ;
19 typedef struct __anonstruct_pte_t_32 pte_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_33 pgd_t;
297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ;
297 typedef struct __anonstruct_pmd_t_35 pmd_t;
423 typedef struct page *pgtable_t;
434 struct file ;
447 struct seq_file ;
483 struct thread_struct ;
485 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
341 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
654 typedef struct cpumask *cpumask_var_t;
26 union __anonunion___u_42 { int __val; char __c[1U]; } ;
23 typedef atomic64_t atomic_long_t;
81 struct static_key { atomic_t enabled; } ;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
254 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ;
26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
169 struct seq_operations ;
372 struct perf_event ;
377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ;
377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t;
378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
69 typedef int pao_T__;
74 typedef int pao_T_____0;
33 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_76 rwlock_t;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_91 seqlock_t;
601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_92 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_92 kuid_t;
27 struct __anonstruct_kgid_t_93 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_93 kgid_t;
139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_94 nodemask_t;
80 struct free_area { struct list_head free_list[6U]; unsigned long nr_free; } ;
92 struct pglist_data ;
93 struct zone_padding { char x[0U]; } ;
208 struct zone_reclaim_stat { unsigned long recent_rotated[2U]; unsigned long recent_scanned[2U]; } ;
221 struct lruvec { struct list_head lists[5U]; struct zone_reclaim_stat reclaim_stat; atomic_long_t inactive_age; struct pglist_data *pgdat; } ;
247 typedef unsigned int isolate_mode_t;
255 struct per_cpu_pages { int count; int high; int batch; struct list_head lists[3U]; } ;
268 struct per_cpu_pageset { struct per_cpu_pages pcp; s8 expire; s8 stat_threshold; s8 vm_stat_diff[21U]; } ;
278 struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[26U]; } ;
284 enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4 } ;
292 struct zone { unsigned long watermark[3U]; unsigned long nr_reserved_highatomic; long lowmem_reserve[4U]; int node; struct pglist_data *zone_pgdat; struct per_cpu_pageset *pageset; unsigned long zone_start_pfn; unsigned long managed_pages; unsigned long spanned_pages; unsigned long present_pages; const char *name; unsigned long nr_isolate_pageblock; wait_queue_head_t *wait_table; unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; struct zone_padding _pad1_; struct free_area free_area[11U]; unsigned long flags; spinlock_t lock; struct zone_padding _pad2_; unsigned long percpu_drift_mark; unsigned long compact_cached_free_pfn; unsigned long compact_cached_migrate_pfn[2U]; unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; bool compact_blockskip_flush; bool contiguous; struct zone_padding _pad3_; atomic_long_t vm_stat[21U]; } ;
560 struct zoneref { struct zone *zone; int zone_idx; } ;
585 struct zonelist { struct zoneref _zonerefs[4097U]; } ;
608 struct pglist_data { struct zone node_zones[4U]; struct zonelist node_zonelists[2U]; int nr_zones; unsigned long node_start_pfn; unsigned long node_present_pages; unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; int kswapd_order; enum zone_type kswapd_classzone_idx; int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; spinlock_t numabalancing_migrate_lock; unsigned long numabalancing_migrate_next_window; unsigned long numabalancing_migrate_nr_pages; unsigned long totalreserve_pages; unsigned long min_unmapped_pages; unsigned long min_slab_pages; struct zone_padding _pad1_; spinlock_t lru_lock; spinlock_t split_queue_lock; struct list_head split_queue; unsigned long split_queue_len; struct lruvec lruvec; unsigned int inactive_ratio; unsigned long flags; struct zone_padding _pad2_; struct per_cpu_nodestat *per_cpu_nodestats; atomic_long_t vm_stat[26U]; } ;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
177 struct rw_semaphore ;
178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
178 struct completion { unsigned int done; wait_queue_head_t wait; } ;
446 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1144 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
835 struct nsproxy ;
836 struct ctl_table_root ;
837 struct ctl_table_header ;
838 struct ctl_dir ;
38 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
58 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
97 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
118 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
123 struct __anonstruct____missing_field_name_100 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
123 union __anonunion____missing_field_name_99 { struct __anonstruct____missing_field_name_100 __annonCompField21; struct callback_head rcu; } ;
123 struct ctl_table_set ;
123 struct ctl_table_header { union __anonunion____missing_field_name_99 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ;
144 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
150 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
155 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
278 struct workqueue_struct ;
279 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
268 struct notifier_block ;
53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
217 struct resource ;
64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ;
220 struct pci_dev ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
144 struct pci_bus ;
26 struct ldt_struct ;
26 struct vdso_image ;
26 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; } ;
26 typedef struct __anonstruct_mm_context_t_165 mm_context_t;
22 struct bio_vec ;
1276 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ;
152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ;
152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ;
152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ;
152 struct dev_pagemap ;
152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ;
152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ;
152 struct kmem_cache ;
152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ;
197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
282 struct userfaultfd_ctx ;
282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ;
289 struct anon_vma ;
289 struct vm_operations_struct ;
289 struct mempolicy ;
289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
362 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
381 struct task_rss_stat { int events; int count[4U]; } ;
389 struct mm_rss_stat { atomic_long_t count[4U]; } ;
394 struct kioctx_table ;
395 struct linux_binfmt ;
395 struct mmu_notifier_mm ;
395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
565 struct vm_fault ;
619 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
199 struct dentry ;
200 struct iattr ;
201 struct super_block ;
202 struct file_system_type ;
203 struct kernfs_open_node ;
204 struct kernfs_iattrs ;
227 struct kernfs_root ;
227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
499 struct sock ;
500 struct kobject ;
501 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
507 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct exception_table_entry ;
24 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
329 struct module_sect_attrs ;
329 struct module_notes_attrs ;
329 struct trace_event_call ;
329 struct trace_enum_map ;
329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
158 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
27 union __anonunion____missing_field_name_244 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ;
27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_244 __annonCompField52; unsigned long nr_segs; } ;
11 typedef unsigned short __kernel_sa_family_t;
18 struct pid ;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
38 struct kiocb ;
38 struct msghdr { void *msg_name; int msg_namelen; struct iov_iter msg_iter; void *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; struct kiocb *msg_iocb; } ;
217 enum ldv_15580 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ;
53 typedef enum ldv_15580 socket_state;
54 struct poll_table_struct ;
55 struct pipe_inode_info ;
56 struct net ;
73 struct fasync_struct ;
73 struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; unsigned long flags; struct callback_head rcu; } ;
99 struct proto_ops ;
99 struct socket { socket_state state; short type; unsigned long flags; struct socket_wq *wq; struct file *file; struct sock *sk; const struct proto_ops *ops; } ;
125 struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, int); int (*getname)(struct socket *, struct sockaddr *, int *, int); unsigned int (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, unsigned long); int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, char *, unsigned int); int (*getsockopt)(struct socket *, int, int, char *, int *); int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct socket *, int, int, char *, int *); int (*sendmsg)(struct socket *, struct msghdr *, size_t ); int (*recvmsg)(struct socket *, struct msghdr *, size_t , int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*sendpage)(struct socket *, struct page *, int, size_t , int); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*set_peek_off)(struct sock *, int); int (*peek_len)(struct socket *); } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_254 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_254 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_256 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_257 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_258 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_259 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_262 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_261 { struct __anonstruct__addr_bnd_262 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_260 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_261 __annonCompField53; } ;
11 struct __anonstruct__sigpoll_263 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_264 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_255 { int _pad[28U]; struct __anonstruct__kill_256 _kill; struct __anonstruct__timer_257 _timer; struct __anonstruct__rt_258 _rt; struct __anonstruct__sigchld_259 _sigchld; struct __anonstruct__sigfault_260 _sigfault; struct __anonstruct__sigpoll_263 _sigpoll; struct __anonstruct__sigsys_264 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_255 _sifields; } ;
118 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
271 struct k_sigaction { struct sigaction sa; } ;
457 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
464 struct pid_namespace ;
464 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ;
125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
41 struct assoc_array_ptr ;
41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_299 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_300 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_302 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_301 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_302 __annonCompField56; } ;
128 struct __anonstruct____missing_field_name_304 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_303 { union key_payload payload; struct __anonstruct____missing_field_name_304 __annonCompField58; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_299 __annonCompField54; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_300 __annonCompField55; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_301 __annonCompField57; union __anonunion____missing_field_name_303 __annonCompField59; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
377 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
377 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
325 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
331 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ;
54 struct cgroup ;
55 struct cgroup_root ;
56 struct cgroup_subsys ;
57 struct cgroup_taskset ;
101 struct cgroup_file { struct kernfs_node *kn; } ;
90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ;
221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ;
306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
546 struct __anonstruct____missing_field_name_308 { u8 is_data; u8 padding; u16 prioidx; u32 classid; } ;
546 union __anonunion____missing_field_name_307 { struct __anonstruct____missing_field_name_308 __annonCompField60; u64 val; } ;
546 struct sock_cgroup_data { union __anonunion____missing_field_name_307 __annonCompField61; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
135 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
493 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
536 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
544 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
551 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
576 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
592 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
614 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
659 struct autogroup ;
660 struct tty_struct ;
660 struct taskstats ;
660 struct tty_audit_buf ;
660 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; atomic_t oom_victims; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
835 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
880 struct backing_dev_info ;
881 struct reclaim_state ;
882 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
896 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
953 struct wake_q_node { struct wake_q_node *next; } ;
1185 struct io_context ;
1219 struct uts_namespace ;
1220 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1228 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1286 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1321 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1358 struct rt_rq ;
1358 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1376 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1440 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1459 struct sched_class ;
1459 struct files_struct ;
1459 struct compat_robust_list_head ;
1459 struct numa_group ;
1459 struct kcov ;
1459 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; struct thread_struct thread; } ;
63 struct exception_table_entry { int insn; int fixup; int handler; } ;
161 struct in6_addr ;
145 struct sk_buff ;
184 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_346 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_345 { struct __anonstruct____missing_field_name_346 __annonCompField65; } ;
114 struct lockref { union __anonunion____missing_field_name_345 __annonCompField66; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_348 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_347 { struct __anonstruct____missing_field_name_348 __annonCompField67; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_347 __annonCompField68; const unsigned char *name; } ;
65 struct dentry_operations ;
65 union __anonunion____missing_field_name_349 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
65 union __anonunion_d_u_350 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_349 __annonCompField69; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_350 d_u; } ;
121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
591 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
63 struct __anonstruct____missing_field_name_352 { struct radix_tree_node *parent; void *private_data; } ;
63 union __anonunion____missing_field_name_351 { struct __anonstruct____missing_field_name_352 __annonCompField70; struct callback_head callback_head; } ;
63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_351 __annonCompField71; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
44 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
34 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ;
84 struct bio_set ;
85 struct bio ;
86 struct bio_integrity_payload ;
87 struct block_device ;
18 typedef void bio_end_io_t(struct bio *);
20 union __anonunion____missing_field_name_359 { struct bio_integrity_payload *bi_integrity; } ;
20 struct bio { struct bio *bi_next; struct block_device *bi_bdev; int bi_error; unsigned int bi_opf; unsigned short bi_flags; unsigned short bi_ioprio; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; union __anonunion____missing_field_name_359 __annonCompField72; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ;
266 struct delayed_call { void (*fn)(void *); void *arg; } ;
261 struct bdi_writeback ;
262 struct export_operations ;
264 struct kstatfs ;
265 struct swap_info_struct ;
266 struct fscrypt_info ;
267 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
261 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_360 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_360 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_361 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_361 __annonCompField73; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
541 struct writeback_control ;
542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
426 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
447 struct request_queue ;
448 struct hd_struct ;
448 struct gendisk ;
448 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
563 struct posix_acl ;
589 struct inode_operations ;
589 union __anonunion____missing_field_name_366 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
589 union __anonunion____missing_field_name_367 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
589 struct file_lock_context ;
589 struct cdev ;
589 union __anonunion____missing_field_name_368 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
589 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_366 __annonCompField74; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_367 __annonCompField75; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_368 __annonCompField76; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
843 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
851 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
874 union __anonunion_f_u_369 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
874 struct file { union __anonunion_f_u_369 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
959 typedef void *fl_owner_t;
960 struct file_lock ;
961 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
967 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
994 struct nlm_lockowner ;
995 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct __anonstruct_afs_371 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_370 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_371 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_370 fl_u; } ;
1047 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1255 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1290 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1320 struct super_operations ;
1320 struct xattr_handler ;
1320 struct mtd_info ;
1320 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1603 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1616 struct dir_context ;
1641 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1648 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1717 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1774 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
2018 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3193 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
76 struct dma_map_ops ;
76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
24 struct device_private ;
25 struct device_driver ;
26 struct driver_private ;
27 struct class ;
28 struct subsys_private ;
29 struct bus_type ;
30 struct device_node ;
31 struct fwnode_handle ;
32 struct iommu_ops ;
33 struct iommu_group ;
61 struct device_attribute ;
61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
142 struct device_type ;
201 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
207 struct of_device_id ;
207 struct acpi_device_id ;
207 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
357 struct class_attribute ;
357 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
450 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
518 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
546 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
699 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
708 struct irq_domain ;
708 struct dma_coherent_mem ;
708 struct cma ;
708 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
862 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
1327 struct scatterlist ;
89 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
143 union __anonunion___u_373 { unsigned long __val; char __c[1U]; } ;
273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ;
308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ;
335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
2451 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
406 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
15 typedef u64 netdev_features_t;
70 union __anonunion_in6_u_382 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ;
70 struct in6_addr { union __anonunion_in6_u_382 in6_u; } ;
46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
199 struct pipe_buf_operations ;
199 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ;
27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ;
63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;
295 struct flowi_tunnel { __be64 tun_id; } ;
26 struct flowi_common { int flowic_oif; int flowic_iif; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; __u8 flowic_proto; __u8 flowic_flags; __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; } ;
42 struct __anonstruct_ports_389 { __be16 dport; __be16 sport; } ;
42 struct __anonstruct_icmpt_390 { __u8 type; __u8 code; } ;
42 struct __anonstruct_dnports_391 { __le16 dport; __le16 sport; } ;
42 struct __anonstruct_mht_392 { __u8 type; } ;
42 union flowi_uli { struct __anonstruct_ports_389 ports; struct __anonstruct_icmpt_390 icmpt; struct __anonstruct_dnports_391 dnports; __be32 spi; __be32 gre_key; struct __anonstruct_mht_392 mht; } ;
66 struct flowi4 { struct flowi_common __fl_common; __be32 saddr; __be32 daddr; union flowi_uli uli; } ;
123 struct flowi6 { struct flowi_common __fl_common; struct in6_addr daddr; struct in6_addr saddr; __be32 flowlabel; union flowi_uli uli; } ;
141 struct flowidn { struct flowi_common __fl_common; __le16 daddr; __le16 saddr; union flowi_uli uli; } ;
161 union __anonunion_u_393 { struct flowi_common __fl_common; struct flowi4 ip4; struct flowi6 ip6; struct flowidn dn; } ;
161 struct flowi { union __anonunion_u_393 u; } ;
265 struct napi_struct ;
266 struct nf_conntrack { atomic_t use; } ;
254 union __anonunion____missing_field_name_394 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ;
254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_394 __annonCompField82; } ;
278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
310 struct skb_frag_struct ;
310 typedef struct skb_frag_struct skb_frag_t;
311 struct __anonstruct_page_395 { struct page *p; } ;
311 struct skb_frag_struct { struct __anonstruct_page_395 page; __u32 page_offset; __u32 size; } ;
344 struct skb_shared_hwtstamps { ktime_t hwtstamp; } ;
410 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; u32 tskey; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ;
500 typedef unsigned int sk_buff_data_t;
501 struct __anonstruct____missing_field_name_397 { u32 stamp_us; u32 stamp_jiffies; } ;
501 union __anonunion____missing_field_name_396 { u64 v64; struct __anonstruct____missing_field_name_397 __annonCompField83; } ;
501 struct skb_mstamp { union __anonunion____missing_field_name_396 __annonCompField84; } ;
564 union __anonunion____missing_field_name_400 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
564 struct __anonstruct____missing_field_name_399 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_400 __annonCompField85; } ;
564 union __anonunion____missing_field_name_398 { struct __anonstruct____missing_field_name_399 __annonCompField86; struct rb_node rbnode; } ;
564 struct sec_path ;
564 struct __anonstruct____missing_field_name_402 { __u16 csum_start; __u16 csum_offset; } ;
564 union __anonunion____missing_field_name_401 { __wsum csum; struct __anonstruct____missing_field_name_402 __annonCompField88; } ;
564 union __anonunion____missing_field_name_403 { unsigned int napi_id; unsigned int sender_cpu; } ;
564 union __anonunion____missing_field_name_404 { __u32 secmark; __u32 offload_fwd_mark; } ;
564 union __anonunion____missing_field_name_405 { __u32 mark; __u32 reserved_tailroom; } ;
564 union __anonunion____missing_field_name_406 { __be16 inner_protocol; __u8 inner_ipproto; } ;
564 struct sk_buff { union __anonunion____missing_field_name_398 __annonCompField87; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_401 __annonCompField89; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_403 __annonCompField90; union __anonunion____missing_field_name_404 __annonCompField91; union __anonunion____missing_field_name_405 __annonCompField92; union __anonunion____missing_field_name_406 __annonCompField93; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
831 struct dst_entry ;
880 struct rtable ;
1012 enum pkt_hash_types { PKT_HASH_TYPE_NONE = 0, PKT_HASH_TYPE_L2 = 1, PKT_HASH_TYPE_L3 = 2, PKT_HASH_TYPE_L4 = 3 } ;
3815 struct iphdr { unsigned char ihl; unsigned char version; __u8 tos; __be16 tot_len; __be16 id; __be16 frag_off; __u8 ttl; __u8 protocol; __sum16 check; __be32 saddr; __be32 daddr; } ;
1402 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
43 struct __anonstruct_sync_serial_settings_409 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_409 sync_serial_settings;
50 struct __anonstruct_te1_settings_410 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_410 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_411 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_411 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_412 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_412 fr_proto;
69 struct __anonstruct_fr_proto_pvc_413 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_413 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_414 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_414 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_415 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_415 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
197 union __anonunion_ifs_ifsu_416 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_416 ifs_ifsu; } ;
216 union __anonunion_ifr_ifrn_417 { char ifrn_name[16U]; } ;
216 union __anonunion_ifr_ifru_418 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
216 struct ifreq { union __anonunion_ifr_ifrn_417 ifr_ifrn; union __anonunion_ifr_ifru_418 ifr_ifru; } ;
18 typedef s32 compat_time_t;
39 typedef s32 compat_long_t;
45 typedef u32 compat_uptr_t;
46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ;
278 struct compat_robust_list { compat_uptr_t next; } ;
282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
131 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
195 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
239 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ;
251 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
273 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
299 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
328 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
345 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
444 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
481 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
509 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
613 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
645 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
687 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
720 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
736 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
756 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ;
774 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ;
790 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ;
806 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
823 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
842 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
892 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
1063 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
1071 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1147 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
1515 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ;
39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
97 struct __anonstruct_link_modes_438 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ;
97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_438 link_modes; } ;
158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;
375 struct prot_inuse ;
376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
160 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
79 struct icmpv6_mib_device { atomic_long_t mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
89 struct icmpv6msg_mib_device { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[9U]; } ;
106 struct linux_mib { unsigned long mibs[117U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct proc_dir_entry ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ;
187 struct ipv4_devconf ;
188 struct fib_rules_ops ;
189 struct fib_table ;
190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ;
24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
29 struct inet_peer_base ;
29 struct xt_table ;
29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; atomic_t rt_genid; } ;
142 struct neighbour ;
142 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ;
40 struct ipv6_devconf ;
40 struct rt6_info ;
40 struct rt6_statistics ;
40 struct fib6_table ;
40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; } ;
89 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
95 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ;
20 struct sctp_mib ;
21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
79 struct nf_logger ;
80 struct nf_queue_handler ;
81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct list_head hooks[13U][8U]; } ;
21 struct ebt_table ;
22 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ;
19 struct hlist_nulls_node ;
19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ;
25 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
30 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
44 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
49 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
54 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ;
65 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ;
72 struct ip_conntrack_stat ;
72 struct nf_ct_event_notifier ;
72 struct nf_exp_event_notifier ;
72 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; } ;
104 struct nft_af_info ;
105 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ;
509 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
486 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
708 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ;
21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ;
30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
89 struct mpls_route ;
90 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ;
16 struct proc_ns_operations ;
17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ;
11 struct net_generic ;
12 struct netns_ipvs ;
13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
247 struct __anonstruct_possible_net_t_453 { struct net *net; } ;
247 typedef struct __anonstruct_possible_net_t_453 possible_net_t;
13 typedef unsigned long kernel_ulong_t;
14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
674 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ;
683 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ;
32 typedef u32 phandle;
34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ;
44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ;
296 struct mii_bus ;
303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ;
41 struct mdio_driver_common { struct device_driver driver; int flags; } ;
244 struct phy_device ;
245 enum ldv_30630 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ;
84 typedef enum ldv_30630 phy_interface_t;
130 enum ldv_30681 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ;
137 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_30681 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ;
218 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ;
233 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ;
326 struct phy_driver ;
326 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; int autoneg; int link_timeout; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; void (*adjust_link)(struct net_device *); } ;
428 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ;
841 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ;
27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_LAST = 5 } ;
36 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ;
70 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ;
86 struct packet_type ;
87 struct dsa_switch ;
87 struct dsa_device_ops ;
87 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ;
140 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; } ;
146 struct dsa_switch_driver ;
146 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_driver *drv; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ;
233 struct switchdev_trans ;
234 struct switchdev_obj ;
235 struct switchdev_obj_port_fdb ;
236 struct switchdev_obj_port_vlan ;
237 struct dsa_switch_driver { struct list_head list; enum dsa_tag_protocol tag_protocol; const char * (*probe)(struct device *, struct device *, int, void **); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); } ;
389 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ;
132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ;
144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ;
58 struct mnt_namespace ;
59 struct ipc_namespace ;
60 struct cgroup_namespace ;
61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ;
86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ;
19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ;
20 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; } ;
609 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct css_set *root_cset; } ;
663 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
99 struct xfrm_policy ;
100 struct xfrm_state ;
116 struct request_sock ;
41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
143 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ;
840 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ;
16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; } ;
118 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ;
96 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ;
486 struct netpoll_info ;
487 struct wireless_dev ;
488 struct wpan_dev ;
489 struct mpls_dev ;
490 struct udp_tunnel_info ;
491 struct bpf_prog ;
69 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ;
112 typedef enum netdev_tx netdev_tx_t;
131 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
194 struct neigh_parms ;
195 struct netdev_hw_addr { struct list_head list; unsigned char addr[32U]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; } ;
215 struct netdev_hw_addr_list { struct list_head list; int count; } ;
220 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
249 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ;
300 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ;
337 enum gro_result { GRO_MERGED = 0, GRO_MERGED_FREE = 1, GRO_HELD = 2, GRO_NORMAL = 3, GRO_DROP = 4 } ;
345 typedef enum gro_result gro_result_t;
346 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
394 typedef enum rx_handler_result rx_handler_result_t;
395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
540 struct Qdisc ;
540 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ;
611 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
623 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
635 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
687 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
710 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
723 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
734 struct netdev_tc_txq { u16 count; u16 offset; } ;
745 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
761 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ;
788 struct tc_cls_u32_offload ;
789 struct tc_cls_flower_offload ;
789 struct tc_cls_matchall_offload ;
789 union __anonunion____missing_field_name_469 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; } ;
789 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_469 __annonCompField106; } ;
804 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ;
809 union __anonunion____missing_field_name_470 { struct bpf_prog *prog; bool prog_attached; } ;
809 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_470 __annonCompField107; } ;
832 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;
1354 struct __anonstruct_adj_list_471 { struct list_head upper; struct list_head lower; } ;
1354 struct __anonstruct_all_adj_list_472 { struct list_head upper; struct list_head lower; } ;
1354 struct iw_handler_def ;
1354 struct iw_public_data ;
1354 struct switchdev_ops ;
1354 struct l3mdev_ops ;
1354 struct ndisc_ops ;
1354 struct vlan_info ;
1354 struct tipc_bearer ;
1354 struct in_device ;
1354 struct dn_dev ;
1354 struct inet6_dev ;
1354 struct tcf_proto ;
1354 struct cpu_rmap ;
1354 struct pcpu_lstats ;
1354 struct pcpu_sw_netstats ;
1354 struct pcpu_dstats ;
1354 struct pcpu_vstats ;
1354 union __anonunion____missing_field_name_473 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1354 struct garp_port ;
1354 struct mrp_port ;
1354 struct rtnl_link_ops ;
1354 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_471 adj_list; struct __anonstruct_all_adj_list_472 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct list_head nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; u32 offload_fwd_mark; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_473 __annonCompField108; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ;
2165 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ;
2195 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
3206 enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ;
103 struct page_counter { atomic_long_t count; unsigned long limit; struct page_counter *parent; unsigned long watermark; unsigned long failcnt; } ;
33 struct eventfd_ctx ;
41 struct vmpressure { unsigned long scanned; unsigned long reclaimed; unsigned long tree_scanned; unsigned long tree_reclaimed; struct spinlock sr_lock; struct list_head events; struct mutex events_lock; struct work_struct work; } ;
44 struct fprop_global { struct percpu_counter events; unsigned int period; seqcount_t sequence; } ;
72 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ;
32 typedef int congested_fn(void *, int);
41 struct bdi_writeback_congested { unsigned long state; atomic_t refcnt; struct backing_dev_info *bdi; int blkcg_id; struct rb_node rb_node; } ;
60 union __anonunion____missing_field_name_478 { struct work_struct release_work; struct callback_head rcu; } ;
60 struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; struct percpu_counter stat[4U]; struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; union __anonunion____missing_field_name_478 __annonCompField109; } ;
134 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned int capabilities; congested_fn *congested_fn; void *congested_data; char *name; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; struct list_head wb_list; struct radix_tree_root cgwb_tree; struct rb_root cgwb_congested_tree; atomic_t usage_cnt; wait_queue_head_t wb_waitq; struct device *dev; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ;
14 enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ;
31 struct writeback_control { long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned char for_kupdate; unsigned char for_background; unsigned char tagged_writepages; unsigned char for_reclaim; unsigned char range_cyclic; unsigned char for_sync; struct bdi_writeback *wb; struct inode *inode; int wb_id; int wb_lcand_id; int wb_tcand_id; size_t wb_bytes; size_t wb_lcand_bytes; size_t wb_tcand_bytes; } ;
101 struct wb_domain { spinlock_t lock; struct fprop_global completions; struct timer_list period_timer; unsigned long period_time; unsigned long dirty_limit_tstamp; unsigned long dirty_limit; } ;
12 typedef void * mempool_alloc_t(gfp_t , void *);
13 typedef void mempool_free_t(void *, void *);
14 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ;
25 typedef struct mempool_s mempool_t;
79 union __anonunion____missing_field_name_479 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ;
79 union __anonunion____missing_field_name_480 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ;
79 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_479 __annonCompField110; union __anonunion____missing_field_name_480 __annonCompField111; unsigned int flags; } ;
92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ;
295 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ;
529 struct bio_list { struct bio *head; struct bio *tail; } ;
661 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ;
87 struct mem_cgroup_id { int id; atomic_t ref; } ;
104 struct mem_cgroup_stat_cpu { long count[11U]; unsigned long events[8U]; unsigned long nr_page_events; unsigned long targets[3U]; } ;
111 struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; unsigned int generation; } ;
117 struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[5U]; struct mem_cgroup_reclaim_iter iter[13U]; struct rb_node tree_node; unsigned long usage_in_excess; bool on_tree; struct mem_cgroup *memcg; } ;
133 struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; } ;
139 struct mem_cgroup_threshold_ary { int current_threshold; unsigned int size; struct mem_cgroup_threshold entries[0U]; } ;
149 struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *primary; struct mem_cgroup_threshold_ary *spare; } ;
160 enum memcg_kmem_state { KMEM_NONE = 0, KMEM_ALLOCATED = 1, KMEM_ONLINE = 2 } ;
166 struct mem_cgroup { struct cgroup_subsys_state css; struct mem_cgroup_id id; struct page_counter memory; struct page_counter swap; struct page_counter memsw; struct page_counter kmem; struct page_counter tcpmem; unsigned long low; unsigned long high; struct work_struct high_work; unsigned long soft_limit; struct vmpressure vmpressure; bool use_hierarchy; bool oom_lock; int under_oom; int swappiness; int oom_kill_disable; struct cgroup_file events_file; struct mutex thresholds_lock; struct mem_cgroup_thresholds thresholds; struct mem_cgroup_thresholds memsw_thresholds; struct list_head oom_notify; unsigned long move_charge_at_immigrate; atomic_t moving_account; spinlock_t move_lock; struct task_struct *move_lock_task; unsigned long move_lock_flags; struct mem_cgroup_stat_cpu *stat; unsigned long socket_pressure; bool tcpmem_active; int tcpmem_pressure; int kmemcg_id; enum memcg_kmem_state kmem_state; int last_scanned_node; nodemask_t scan_nodes; atomic_t numainfo_events; atomic_t numainfo_updating; struct list_head cgwb_list; struct wb_domain cgwb_domain; struct list_head event_list; spinlock_t event_list_lock; struct mem_cgroup_per_node *nodeinfo[0U]; } ;
27 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ;
41 struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; } ;
51 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ;
519 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ;
122 struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; } ;
13 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; } ;
87 struct nla_policy { u16 type; u16 len; } ;
25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ;
158 struct Qdisc_ops ;
159 struct qdisc_walker ;
160 struct tcf_walker ;
30 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ;
38 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct list_head list; u32 handle; u32 parent; void *u32_node; struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; struct sk_buff *gso_skb; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; struct sk_buff *skb_bad_txq; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ;
126 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 ); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ;
158 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ;
183 struct tcf_result { unsigned long class; u32 classid; } ;
189 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); bool (*destroy)(struct tcf_proto *, bool ); unsigned long int (*get)(struct tcf_proto *, u32 ); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ;
214 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct callback_head rcu; } ;
806 struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; } ;
49 struct bpf_insn { __u8 code; unsigned char dst_reg; unsigned char src_reg; __s16 off; __s32 imm; } ;
88 enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6 } ;
472 struct bpf_prog_aux ;
323 struct sock_fprog_kern { u16 len; struct sock_filter *filter; } ;
334 union __anonunion____missing_field_name_504 { struct sock_filter insns[0U]; struct bpf_insn insnsi[0U]; } ;
334 struct bpf_prog { u16 pages; unsigned char jited; unsigned char gpl_compatible; unsigned char cb_access; unsigned char dst_needed; u32 len; enum bpf_prog_type type; struct bpf_prog_aux *aux; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *, const struct bpf_insn *); union __anonunion____missing_field_name_504 __annonCompField118; } ;
355 struct sk_filter { atomic_t refcnt; struct callback_head rcu; struct bpf_prog *prog; } ;
138 struct pollfd { int fd; short events; short revents; } ;
32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ;
187 struct neigh_table ;
187 struct neigh_parms { possible_net_t net; struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; atomic_t refcnt; struct callback_head callback_head; int reachable_time; int data[13U]; unsigned long data_state[1U]; } ;
110 struct neigh_statistics { unsigned long allocs; unsigned long destroys; unsigned long hash_grows; unsigned long res_failed; unsigned long lookups; unsigned long hits; unsigned long rcv_probes_mcast; unsigned long rcv_probes_ucast; unsigned long periodic_gc_runs; unsigned long forced_gc_runs; unsigned long unres_discards; unsigned long table_fulls; } ;
130 struct neigh_ops ;
130 struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; __u8 flags; __u8 nud_state; __u8 type; __u8 dead; seqlock_t ha_lock; unsigned char ha[32U]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct callback_head rcu; struct net_device *dev; u8 primary_key[0U]; } ;
159 struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); } ;
167 struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; u8 flags; u8 key[0U]; } ;
175 struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4U]; struct callback_head rcu; } ;
188 struct neigh_table { int family; int entry_size; int key_len; __be16 protocol; __u32 (*hash)(const void *, const struct net_device *, __u32 *); bool (*key_eq)(const struct neighbour *, const void *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; rwlock_t lock; unsigned long last_rand; struct neigh_statistics *stats; struct neigh_hash_table *nht; struct pneigh_entry **phash_buckets; } ;
520 struct lwtunnel_state ;
520 struct dn_route ;
520 union __anonunion____missing_field_name_520 { struct dst_entry *next; struct rtable *rt_next; struct rt6_info *rt6_next; struct dn_route *dn_next; } ;
520 struct dst_entry { struct callback_head callback_head; struct dst_entry *child; struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; struct dst_entry *path; struct dst_entry *from; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct net *, struct sock *, struct sk_buff *); unsigned short flags; unsigned short pending_confirm; short error; short obsolete; unsigned short header_len; unsigned short trailer_len; __u32 tclassid; long __pad_to_align_refcnt[2U]; atomic_t __refcnt; int __use; unsigned long lastuse; struct lwtunnel_state *lwtstate; union __anonunion____missing_field_name_520 __annonCompField119; } ;
110 struct __anonstruct_socket_lock_t_521 { spinlock_t slock; int owned; wait_queue_head_t wq; struct lockdep_map dep_map; } ;
110 typedef struct __anonstruct_socket_lock_t_521 socket_lock_t;
110 struct proto ;
116 typedef __u32 __portpair;
117 typedef __u64 __addrpair;
118 struct __anonstruct____missing_field_name_523 { __be32 skc_daddr; __be32 skc_rcv_saddr; } ;
118 union __anonunion____missing_field_name_522 { __addrpair skc_addrpair; struct __anonstruct____missing_field_name_523 __annonCompField120; } ;
118 union __anonunion____missing_field_name_524 { unsigned int skc_hash; __u16 skc_u16hashes[2U]; } ;
118 struct __anonstruct____missing_field_name_526 { __be16 skc_dport; __u16 skc_num; } ;
118 union __anonunion____missing_field_name_525 { __portpair skc_portpair; struct __anonstruct____missing_field_name_526 __annonCompField123; } ;
118 union __anonunion____missing_field_name_527 { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; } ;
118 struct inet_timewait_death_row ;
118 union __anonunion____missing_field_name_528 { unsigned long skc_flags; struct sock *skc_listener; struct inet_timewait_death_row *skc_tw_dr; } ;
118 union __anonunion____missing_field_name_529 { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; } ;
118 union __anonunion____missing_field_name_530 { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; } ;
118 union __anonunion____missing_field_name_531 { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; } ;
118 struct sock_common { union __anonunion____missing_field_name_522 __annonCompField121; union __anonunion____missing_field_name_524 __annonCompField122; union __anonunion____missing_field_name_525 __annonCompField124; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; unsigned char skc_reuseport; unsigned char skc_ipv6only; unsigned char skc_net_refcnt; int skc_bound_dev_if; union __anonunion____missing_field_name_527 __annonCompField125; struct proto *skc_prot; possible_net_t skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; atomic64_t skc_cookie; union __anonunion____missing_field_name_528 __annonCompField126; int skc_dontcopy_begin[0U]; union __anonunion____missing_field_name_529 __annonCompField127; int skc_tx_queue_mapping; union __anonunion____missing_field_name_530 __annonCompField128; atomic_t skc_refcnt; int skc_dontcopy_end[0U]; union __anonunion____missing_field_name_531 __annonCompField129; } ;
230 struct __anonstruct_sk_backlog_532 { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } ;
230 union __anonunion____missing_field_name_533 { struct socket_wq *sk_wq; struct socket_wq *sk_wq_raw; } ;
230 struct sock_reuseport ;
230 struct sock { struct sock_common __sk_common; socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; struct __anonstruct_sk_backlog_532 sk_backlog; int sk_forward_alloc; __u32 sk_txhash; unsigned int sk_napi_id; unsigned int sk_ll_usec; atomic_t sk_drops; int sk_rcvbuf; struct sk_filter *sk_filter; union __anonunion____missing_field_name_533 __annonCompField130; struct xfrm_policy *sk_policy[2U]; struct dst_entry *sk_rx_dst; struct dst_entry *sk_dst_cache; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_write_queue; unsigned char sk_padding; unsigned char sk_no_check_tx; unsigned char sk_no_check_rx; unsigned char sk_userlocks; unsigned char sk_protocol; unsigned short sk_type; int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; u32 sk_max_pacing_rate; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; __u32 sk_priority; __u32 sk_mark; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; u8 sk_shutdown; u32 sk_tskey; struct socket *sk_socket; void *sk_user_data; struct page_frag sk_frag; struct sk_buff *sk_send_head; __s32 sk_peek_off; int sk_write_pending; void *sk_security; struct sock_cgroup_data sk_cgrp_data; struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); struct sock_reuseport *sk_reuseport_cb; struct callback_head sk_rcu; } ;
948 struct request_sock_ops ;
949 struct timewait_sock_ops ;
950 struct inet_hashinfo ;
951 struct raw_hashinfo ;
965 struct udp_table ;
965 union __anonunion_h_544 { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; } ;
965 struct proto { void (*close)(struct sock *, long); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, int, int *); int (*ioctl)(struct sock *, int, unsigned long); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, char *, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct sock *, int, int, char *, int *); int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); int (*sendmsg)(struct sock *, struct msghdr *, size_t ); int (*recvmsg)(struct sock *, struct msghdr *, size_t , int, int, int *); int (*sendpage)(struct sock *, struct page *, int, size_t , int); int (*bind)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); void (*release_cb)(struct sock *); int (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, unsigned short); void (*clear_sk)(struct sock *, int); unsigned int inuse_idx; bool (*stream_memory_free)(const struct sock *); void (*enter_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; struct percpu_counter *sockets_allocated; int *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; int slab_flags; struct percpu_counter *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union __anonunion_h_544 h; struct module *owner; char name[32U]; struct list_head node; int (*diag_destroy)(struct sock *, int); } ;
2266 struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *, struct request_sock *); void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(const struct sock *, struct sk_buff *); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(const struct request_sock *); } ;
46 struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; unsigned char cookie_ts; unsigned char num_timeout; u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 *saved_syn; u32 secid; u32 peer_secid; } ;
18 struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; __u8 res2; __u8 action; __u32 flags; } ;
68 struct fib_rule { struct list_head list; int iifindex; int oifindex; u32 mark; u32 mark_mask; u32 flags; u32 table; u8 action; u8 l3mdev; u32 target; __be64 tun_id; struct fib_rule *ctarget; struct net *fr_net; atomic_t refcnt; u32 pref; int suppress_ifgroup; int suppress_prefixlen; char iifname[16U]; char oifname[16U]; struct callback_head rcu; } ;
35 struct fib_lookup_arg { void *lookup_ptr; void *result; struct fib_rule *rule; u32 table; int flags; } ;
43 struct fib_rules_ops { int family; struct list_head list; int rule_size; int addr_size; int unresolved_rules; int nr_goto_rules; int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **); int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); size_t (*nlmsg_payload)(struct fib_rule *); void (*flush_cache)(struct fib_rules_ops *); int nlgroup; const struct nla_policy *policy; struct list_head rules_list; struct module *owner; struct net *fro_net; struct callback_head rcu; } ;
140 struct l3mdev_ops { u32 (*l3mdev_fib_table)(const struct net_device *); struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16 ); struct rtable * (*l3mdev_get_rtable)(const struct net_device *, const struct flowi4 *); int (*l3mdev_get_saddr)(struct net_device *, struct flowi4 *); struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *, struct flowi6 *); int (*l3mdev_get_saddr6)(struct net_device *, const struct sock *, struct flowi6 *); } ;
106 struct ipv6hdr { unsigned char priority; unsigned char version; __u8 flow_lbl[3U]; __be16 payload_len; __u8 nexthdr; __u8 hop_limit; struct in6_addr saddr; struct in6_addr daddr; } ;
180 struct ipv6_stable_secret { bool initialized; struct in6_addr secret; } ;
64 struct ipv6_devconf { __s32 forwarding; __s32 hop_limit; __s32 mtu6; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_max_plen; __s32 proxy_ndp; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 optimistic_dad; __s32 use_optimistic; __s32 mc_forwarding; __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; struct ipv6_stable_secret stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; struct ctl_table_header *sysctl_header; } ;
328 struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *, struct sock *, void *); void (*twsk_destructor)(struct sock *); } ;
39 struct inet_timewait_death_row { atomic_t tw_count; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_max_tw_buckets; } ;
142 struct tcphdr { __be16 source; __be16 dest; __be32 seq; __be32 ack_seq; unsigned char res1; unsigned char doff; unsigned char fin; unsigned char syn; unsigned char rst; unsigned char psh; unsigned char ack; unsigned char urg; unsigned char ece; unsigned char cwr; __be16 window; __sum16 check; __be16 urg_ptr; } ;
100 struct ip6_sf_list { struct ip6_sf_list *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2U]; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; } ;
109 struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 *next; struct ip6_sf_list *mca_sources; struct ip6_sf_list *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2U]; struct timer_list mca_timer; unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; unsigned long mca_cstamp; unsigned long mca_tstamp; } ;
141 struct ifacaddr6 { struct in6_addr aca_addr; struct inet6_dev *aca_idev; struct rt6_info *aca_rt; struct ifacaddr6 *aca_next; int aca_users; atomic_t aca_refcnt; unsigned long aca_cstamp; unsigned long aca_tstamp; } ;
152 struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; struct ipstats_mib *ipv6; struct icmpv6_mib_device *icmpv6dev; struct icmpv6msg_mib_device *icmpv6msgdev; } ;
163 struct inet6_dev { struct net_device *dev; struct list_head addr_list; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; spinlock_t mc_lock; unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; unsigned long mc_v1_seen; unsigned long mc_qi; unsigned long mc_qri; unsigned long mc_maxdelay; struct timer_list mc_gq_timer; struct timer_list mc_ifc_timer; struct timer_list mc_dad_timer; struct ifacaddr6 *ac_list; rwlock_t lock; atomic_t refcnt; __u32 if_flags; int dead; u8 rndid[8U]; struct timer_list regen_timer; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __u8 rs_probes; __u8 addr_gen_mode; unsigned long tstamp; struct callback_head rcu; } ;
127 struct arphdr { __be16 ar_hrd; __be16 ar_pro; unsigned char ar_hln; unsigned char ar_pln; __be16 ar_op; } ;
47 struct prefix_info ;
98 struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; } ;
103 struct ndisc_options { struct nd_opt_hdr *nd_opt_array[6U]; struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; struct nd_opt_hdr *nd_802154_opt_array[3U]; } ;
134 struct ndisc_ops { int (*is_useropt)(u8 ); int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); void (*update)(const struct net_device *, struct neighbour *, u32 , u8 , const struct ndisc_options *); int (*opt_addr_space)(const struct net_device *, u8 , struct neighbour *, u8 *, u8 **); void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8 , const u8 *); void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32 , bool , bool , __u32 , u32 , bool ); } ;
1047 struct ipv4_addr_key { __be32 addr; int vif; } ;
23 union __anonunion____missing_field_name_583 { struct ipv4_addr_key a4; struct in6_addr a6; u32 key[4U]; } ;
23 struct inetpeer_addr { union __anonunion____missing_field_name_583 __annonCompField133; __u16 family; } ;
34 union __anonunion____missing_field_name_584 { struct list_head gc_list; struct callback_head gc_rcu; } ;
34 struct __anonstruct____missing_field_name_586 { atomic_t rid; } ;
34 union __anonunion____missing_field_name_585 { struct __anonstruct____missing_field_name_586 __annonCompField135; struct callback_head rcu; struct inet_peer *gc_next; } ;
34 struct inet_peer { struct inet_peer *avl_left; struct inet_peer *avl_right; struct inetpeer_addr daddr; __u32 avl_height; u32 metrics[16U]; u32 rate_tokens; unsigned long rate_last; union __anonunion____missing_field_name_584 __annonCompField134; union __anonunion____missing_field_name_585 __annonCompField136; __u32 dtime; atomic_t refcnt; } ;
65 struct inet_peer_base { struct inet_peer *root; seqlock_t lock; int total; } ;
174 struct fib_table { struct hlist_node tb_hlist; u32 tb_id; int tb_num_default; struct callback_head rcu; unsigned long *tb_data; unsigned long __data[0U]; } ;
48 struct uncached_list ;
49 struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; __be32 rt_gateway; u32 rt_pmtu; u32 rt_table_id; struct list_head rt_uncached; struct uncached_list *rt_uncached_list; } ;
213 struct in_ifaddr ;
70 struct hotplug_slot ;
70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ;
108 typedef int pci_power_t;
135 typedef unsigned int pci_channel_state_t;
136 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ;
161 typedef unsigned short pci_dev_flags_t;
188 typedef unsigned short pci_bus_flags_t;
245 struct pcie_link_state ;
246 struct pci_vpd ;
247 struct pci_sriov ;
249 struct pci_driver ;
249 union __anonunion____missing_field_name_594 { struct pci_sriov *sriov; struct pci_dev *physfn; } ;
249 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char bridge_d3; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct cpumask *irq_affinity; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; unsigned char non_compliant_bars; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_594 __annonCompField138; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ;
452 struct pci_ops ;
452 struct msi_controller ;
452 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ;
576 struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;
606 struct pci_dynids { spinlock_t lock; struct list_head list; } ;
620 typedef unsigned int pci_ers_result_t;
630 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ;
663 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ;
41 struct msix_entry { u32 vector; u16 entry; } ;
63 struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; } ;
646 struct ipv4_devconf { void *sysctl; int data[31U]; unsigned long state[1U]; } ;
20 struct ip_mc_list ;
20 struct in_device { struct net_device *dev; atomic_t refcnt; int dead; struct in_ifaddr *ifa_list; struct ip_mc_list *mc_list; struct ip_mc_list **mc_hash; int mc_count; spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned char mr_qrv; unsigned char mr_gq_running; unsigned char mr_ifc_count; struct timer_list mr_gq_timer; struct timer_list mr_ifc_timer; struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct callback_head callback_head; } ;
71 struct in_ifaddr { struct hlist_node hash; struct in_ifaddr *ifa_next; struct in_device *ifa_dev; struct callback_head callback_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; __u32 ifa_flags; char ifa_label[16U]; __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; unsigned long ifa_tstamp; } ;
205 union __anonunion___u_601 { struct in_device *__val; char __c[1U]; } ;
259 struct UPT1_TxStats { u64 TSOPktsTxOK; u64 TSOBytesTxOK; u64 ucastPktsTxOK; u64 ucastBytesTxOK; u64 mcastPktsTxOK; u64 mcastBytesTxOK; u64 bcastPktsTxOK; u64 bcastBytesTxOK; u64 pktsTxError; u64 pktsTxDiscard; } ;
42 struct UPT1_RxStats { u64 LROPktsRxOK; u64 LROBytesRxOK; u64 ucastPktsRxOK; u64 ucastBytesRxOK; u64 mcastPktsRxOK; u64 mcastBytesRxOK; u64 bcastPktsRxOK; u64 bcastBytesRxOK; u64 pktsRxOutOfBuf; u64 pktsRxError; } ;
75 struct UPT1_RSSConf { u16 hashType; u16 hashFunc; u16 hashKeySize; u16 indTableSize; u8 hashKey[40U]; u8 indTable[128U]; } ;
147 struct Vmxnet3_TxDesc { __le64 addr; unsigned short len; unsigned char gen; unsigned char rsvd; unsigned char dtype; unsigned char ext1; unsigned short msscof; unsigned short hlen; unsigned char om; unsigned char eop; unsigned char cq; unsigned char ext2; unsigned char ti; unsigned short tci; } ;
155 struct Vmxnet3_TxDataDesc { u8 data[128U]; } ;
180 typedef u8 Vmxnet3_RxDataDesc;
181 struct Vmxnet3_TxCompDesc { unsigned short txdIdx; unsigned int ext1; __le32 ext2; __le32 ext3; unsigned int rsvd; unsigned char type; unsigned char gen; } ;
199 struct Vmxnet3_RxDesc { __le64 addr; unsigned short len; unsigned char btype; unsigned char dtype; unsigned short rsvd; unsigned char gen; u32 ext1; } ;
218 struct Vmxnet3_RxCompDesc { unsigned short rxdIdx; unsigned char ext1; unsigned char eop; unsigned char sop; unsigned short rqID; unsigned char rssType; unsigned char cnc; unsigned char ext2; __le32 rssHash; unsigned short len; unsigned char err; unsigned char ts; unsigned short tci; unsigned short csum; unsigned char tuc; unsigned char udp; unsigned char tcp; unsigned char ipc; unsigned char v6; unsigned char v4; unsigned char frg; unsigned char fcs; unsigned char type; unsigned char gen; } ;
288 struct Vmxnet3_RxCompDescExt { __le32 dword1; u8 segCnt; u8 dupAckCnt; __le16 tsDelta; __le32 dword2; unsigned short mss; unsigned char tuc; unsigned char udp; unsigned char tcp; unsigned char ipc; unsigned char v6; unsigned char v4; unsigned char frg; unsigned char fcs; unsigned char type; unsigned char gen; } ;
329 union Vmxnet3_GenericDesc { __le64 qword[2U]; __le32 dword[4U]; __le16 word[8U]; struct Vmxnet3_TxDesc txd; struct Vmxnet3_RxDesc rxd; struct Vmxnet3_TxCompDesc tcd; struct Vmxnet3_RxCompDesc rcd; struct Vmxnet3_RxCompDescExt rcdExt; } ;
376 struct Vmxnet3_GOSInfo { unsigned char gosBits; unsigned char gosType; unsigned short gosVer; unsigned short gosMisc; } ;
445 struct Vmxnet3_DriverInfo { __le32 version; struct Vmxnet3_GOSInfo gos; __le32 vmxnet3RevSpt; __le32 uptVerSpt; } ;
453 struct Vmxnet3_MiscConf { struct Vmxnet3_DriverInfo driverInfo; __le64 uptFeatures; __le64 ddPA; __le64 queueDescPA; __le32 ddLen; __le32 queueDescLen; __le32 mtu; __le16 maxNumRxSG; u8 numTxQueues; u8 numRxQueues; __le32 reserved[4U]; } ;
479 struct Vmxnet3_TxQueueConf { __le64 txRingBasePA; __le64 dataRingBasePA; __le64 compRingBasePA; __le64 ddPA; __le64 reserved; __le32 txRingSize; __le32 dataRingSize; __le32 compRingSize; __le32 ddLen; u8 intrIdx; u8 _pad1[1U]; __le16 txDataRingDescSize; u8 _pad2[4U]; } ;
496 struct Vmxnet3_RxQueueConf { __le64 rxRingBasePA[2U]; __le64 compRingBasePA; __le64 ddPA; __le64 rxDataRingBasePA; __le32 rxRingSize[2U]; __le32 compRingSize; __le32 ddLen; u8 intrIdx; u8 _pad1[1U]; __le16 rxDataRingDescSize; u8 _pad2[4U]; } ;
511 enum vmxnet3_intr_mask_mode { VMXNET3_IMM_AUTO = 0, VMXNET3_IMM_ACTIVE = 1, VMXNET3_IMM_LAZY = 2 } ;
517 enum vmxnet3_intr_type { VMXNET3_IT_AUTO = 0, VMXNET3_IT_INTX = 1, VMXNET3_IT_MSI = 2, VMXNET3_IT_MSIX = 3 } ;
524 struct Vmxnet3_IntrConf { bool autoMask; u8 numIntrs; u8 eventIntrIdx; u8 modLevels[25U]; __le32 intrCtrl; __le32 reserved[2U]; } ;
544 struct Vmxnet3_QueueStatus { bool stopped; u8 _pad[3U]; __le32 error; } ;
554 struct Vmxnet3_TxQueueCtrl { __le32 txNumDeferred; __le32 txThreshold; __le64 reserved; } ;
561 struct Vmxnet3_RxQueueCtrl { bool updateRxProd; u8 _pad[7U]; __le64 reserved; } ;
576 struct Vmxnet3_RxFilterConf { __le32 rxMode; __le16 mfTableLen; __le16 _pad1; __le64 mfTablePA; __le32 vfTable[128U]; } ;
584 struct Vmxnet3_PM_PktFilter { u8 maskSize; u8 patternSize; u8 mask[16U]; u8 pattern[128U]; u8 pad[6U]; } ;
602 struct Vmxnet3_PMConf { __le16 wakeUpEvents; u8 numFilters; u8 pad[5U]; struct Vmxnet3_PM_PktFilter filters[6U]; } ;
610 struct Vmxnet3_VariableLenConfDesc { __le32 confVer; __le32 confLen; __le64 confPA; } ;
617 struct Vmxnet3_TxQueueDesc { struct Vmxnet3_TxQueueCtrl ctrl; struct Vmxnet3_TxQueueConf conf; struct Vmxnet3_QueueStatus status; struct UPT1_TxStats stats; u8 _pad[88U]; } ;
628 struct Vmxnet3_RxQueueDesc { struct Vmxnet3_RxQueueCtrl ctrl; struct Vmxnet3_RxQueueConf conf; struct Vmxnet3_QueueStatus status; struct UPT1_RxStats stats; u8 __pad[88U]; } ;
638 struct Vmxnet3_SetPolling { u8 enablePolling; } ;
642 enum Vmxnet3_CoalesceMode { VMXNET3_COALESCE_DISABLED = 0, VMXNET3_COALESCE_ADAPT = 1, VMXNET3_COALESCE_STATIC = 2, VMXNET3_COALESCE_RBC = 3 } ;
649 struct Vmxnet3_CoalesceRbc { u32 rbc_rate; } ;
657 struct Vmxnet3_CoalesceStatic { u32 tx_depth; u32 tx_comp_depth; u32 rx_depth; } ;
663 union __anonunion_coalPara_602 { struct Vmxnet3_CoalesceRbc coalRbc; struct Vmxnet3_CoalesceStatic coalStatic; } ;
663 struct Vmxnet3_CoalesceScheme { enum Vmxnet3_CoalesceMode coalMode; union __anonunion_coalPara_602 coalPara; } ;
687 union Vmxnet3_CmdInfo { struct Vmxnet3_VariableLenConfDesc varConf; struct Vmxnet3_SetPolling setPolling; __le64 data[2U]; } ;
696 struct Vmxnet3_DSDevRead { struct Vmxnet3_MiscConf misc; struct Vmxnet3_IntrConf intrConf; struct Vmxnet3_RxFilterConf rxFilterConf; struct Vmxnet3_VariableLenConfDesc rssConfDesc; struct Vmxnet3_VariableLenConfDesc pmConfDesc; struct Vmxnet3_VariableLenConfDesc pluginConfDesc; } ;
706 union __anonunion_cu_603 { __le32 reserved1[4U]; union Vmxnet3_CmdInfo cmdInfo; } ;
706 struct Vmxnet3_DriverShared { __le32 magic; __le32 pad; struct Vmxnet3_DSDevRead devRead; __le32 ecr; __le32 reserved; union __anonunion_cu_603 cu; } ;
749 struct vmxnet3_cmd_ring { union Vmxnet3_GenericDesc *base; u32 size; u32 next2fill; u32 next2comp; u8 gen; dma_addr_t basePA; } ;
159 struct vmxnet3_comp_ring { union Vmxnet3_GenericDesc *base; u32 size; u32 next2proc; u8 gen; u8 intr_idx; dma_addr_t basePA; } ;
178 struct vmxnet3_tx_data_ring { struct Vmxnet3_TxDataDesc *base; u32 size; dma_addr_t basePA; } ;
191 struct vmxnet3_tx_buf_info { u32 map_type; u16 len; u16 sop_idx; dma_addr_t dma_addr; struct sk_buff *skb; } ;
199 struct vmxnet3_tq_driver_stats { u64 drop_total; u64 drop_too_many_frags; u64 drop_oversized_hdr; u64 drop_hdr_inspect_err; u64 drop_tso; u64 tx_ring_full; u64 linearized; u64 copy_skb_header; u64 oversized_hdr; } ;
215 struct vmxnet3_tx_ctx { bool ipv4; bool ipv6; u16 mss; u32 eth_ip_hdr_size; u32 l4_hdr_size; u32 copy_size; union Vmxnet3_GenericDesc *sop_txd; union Vmxnet3_GenericDesc *eop_txd; } ;
228 struct vmxnet3_adapter ;
228 struct vmxnet3_tx_queue { char name[24U]; struct vmxnet3_adapter *adapter; spinlock_t tx_lock; struct vmxnet3_cmd_ring tx_ring; struct vmxnet3_tx_buf_info *buf_info; dma_addr_t buf_info_pa; struct vmxnet3_tx_data_ring data_ring; struct vmxnet3_comp_ring comp_ring; struct Vmxnet3_TxQueueCtrl *shared; struct vmxnet3_tq_driver_stats stats; bool stopped; int num_stop; int qid; u16 txdata_desc_size; } ;
246 enum vmxnet3_rx_buf_type { VMXNET3_RX_BUF_NONE = 0, VMXNET3_RX_BUF_SKB = 1, VMXNET3_RX_BUF_PAGE = 2 } ;
252 union __anonunion____missing_field_name_604 { struct sk_buff *skb; struct page *page; } ;
252 struct vmxnet3_rx_buf_info { enum vmxnet3_rx_buf_type buf_type; u16 len; union __anonunion____missing_field_name_604 __annonCompField139; dma_addr_t dma_addr; } ;
262 struct vmxnet3_rx_ctx { struct sk_buff *skb; u32 sop_idx; } ;
267 struct vmxnet3_rq_driver_stats { u64 drop_total; u64 drop_err; u64 drop_fcs; u64 rx_buf_alloc_failure; } ;
274 struct vmxnet3_rx_data_ring { Vmxnet3_RxDataDesc *base; dma_addr_t basePA; u16 desc_size; } ;
280 struct vmxnet3_rx_queue { char name[24U]; struct vmxnet3_adapter *adapter; struct napi_struct napi; struct vmxnet3_cmd_ring rx_ring[2U]; struct vmxnet3_rx_data_ring data_ring; struct vmxnet3_comp_ring comp_ring; struct vmxnet3_rx_ctx rx_ctx; u32 qid; u32 qid2; u32 dataRingQid; struct vmxnet3_rx_buf_info *buf_info[2U]; dma_addr_t buf_info_pa; struct Vmxnet3_RxQueueCtrl *shared; struct vmxnet3_rq_driver_stats stats; } ;
297 struct vmxnet3_intr { enum vmxnet3_intr_mask_mode mask_mode; enum vmxnet3_intr_type type; u8 num_intrs; u8 event_intr_idx; u8 mod_levels[17U]; char event_msi_vector_name[27U]; struct msix_entry msix_entries[17U]; } ;
319 struct vmxnet3_adapter { struct vmxnet3_tx_queue tx_queue[8U]; struct vmxnet3_rx_queue rx_queue[8U]; unsigned long active_vlans[64U]; struct vmxnet3_intr intr; spinlock_t cmd_lock; struct Vmxnet3_DriverShared *shared; struct Vmxnet3_PMConf *pm_conf; struct Vmxnet3_TxQueueDesc *tqd_start; struct Vmxnet3_RxQueueDesc *rqd_start; struct net_device *netdev; struct pci_dev *pdev; u8 *hw_addr0; u8 *hw_addr1; u8 version; bool rxcsum; bool lro; struct UPT1_RSSConf *rss_conf; bool rss; u32 num_rx_queues; u32 num_tx_queues; unsigned int skb_buf_size; int rx_buf_per_pkt; dma_addr_t shared_pa; dma_addr_t queue_desc_pa; dma_addr_t coal_conf_pa; u32 wol; u32 link_speed; u64 tx_timeout_count; u32 tx_ring_size; u32 rx_ring_size; u32 rx_ring2_size; u16 txdata_desc_size; u16 rxdata_desc_size; bool rxdataring_enabled; struct work_struct work; unsigned long state; int share_intr; struct Vmxnet3_CoalesceScheme *coal_conf; bool default_coal_mode; dma_addr_t adapter_pa; dma_addr_t pm_conf_pa; dma_addr_t rss_conf_pa; } ;
1220 union __anonunion_hdr_606 { void *ptr; struct ethhdr *eth; struct iphdr *ipv4; struct ipv6hdr *ipv6; struct tcphdr *tcp; } ;
473 struct vmxnet3_stat_desc { char desc[32U]; int offset; } ;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long, long);
218 void __read_once_size(const volatile void *p, void *res, int size);
243 void __write_once_size(volatile void *p, void *res, int size);
33 extern struct module __this_module;
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
204 bool test_and_set_bit(long nr, volatile unsigned long *addr);
308 bool constant_test_bit(long nr, const volatile unsigned long *addr);
479 int fls64(__u64 x);
14 unsigned long int find_next_bit(const unsigned long *, unsigned long, unsigned long);
42 unsigned long int find_first_bit(const unsigned long *, unsigned long);
46 __u16 __fswab16(__u16 val);
148 void le32_add_cpu(__le32 *var, u32 val);
187 unsigned int fls_long(unsigned long l);
70 unsigned long int __rounddown_pow_of_two(unsigned long n);
278 void __pr_info(const char *, ...);
63 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);
69 void __dynamic_netdev_dbg(struct _ddebug *, const struct net_device *, const char *, ...);
411 int sprintf(char *, const char *, ...);
8 void ldv_dma_map_page();
9 void ldv_dma_mapping_error();
25 void INIT_LIST_HEAD(struct list_head *list);
87 void __bad_percpu_size();
295 void __bad_size_call_parameter();
71 void warn_slowpath_null(const char *, const int);
7 extern unsigned long page_offset_base;
23 unsigned long int __phys_addr(unsigned long);
31 void * __memcpy(void *, const void *, size_t );
56 void * __memset(void *, int, size_t );
114 int __bitmap_weight(const unsigned long *, unsigned int);
311 int bitmap_weight(const unsigned long *src, unsigned int nbits);
37 extern int nr_cpu_ids;
89 extern struct cpumask __cpu_online_mask;
478 unsigned int cpumask_weight(const struct cpumask *srcp);
24 int atomic_read(const atomic_t *v);
89 void atomic_inc(atomic_t *v);
115 bool atomic_dec_and_test(atomic_t *v);
170 int static_key_count(struct static_key *key);
180 bool static_key_false(struct static_key *key);
8 extern int __preempt_count;
67 void __preempt_count_add(int val);
72 void __preempt_count_sub(int val);
7 void __local_bh_disable_ip(unsigned long, unsigned int);
16 void local_bh_disable();
22 void __local_bh_enable_ip(unsigned long, unsigned int);
29 void local_bh_enable();
281 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int);
334 void lock_acquire(struct lockdep_map *, unsigned int, int, int, int, struct lockdep_map *, unsigned long);
338 void lock_release(struct lockdep_map *, int, unsigned long);
571 void lockdep_rcu_suspicious(const char *, const int, const char *);
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
22 void _raw_spin_lock(raw_spinlock_t *);
34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);
41 void _raw_spin_unlock(raw_spinlock_t *);
45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
289 raw_spinlock_t * spinlock_check(spinlock_t *lock);
300 void spin_lock(spinlock_t *lock);
345 void spin_unlock(spinlock_t *lock);
360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
11 void dump_page(struct page *, const char *);
307 void __rcu_read_lock();
313 void __rcu_read_unlock();
110 bool rcu_is_watching();
486 void rcu_lock_acquire(struct lockdep_map *map);
491 void rcu_lock_release(struct lockdep_map *map);
496 extern struct lockdep_map rcu_lock_map;
500 int debug_lockdep_rcu_enabled();
502 int rcu_read_lock_held();
866 void rcu_read_lock();
920 void rcu_read_unlock();
181 void __init_work(struct work_struct *, int);
353 extern struct workqueue_struct *system_wq;
430 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *);
445 bool cancel_work_sync(struct work_struct *);
470 bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
529 bool schedule_work(struct work_struct *work);
58 unsigned int readl(const volatile void *addr);
66 void writel(unsigned int val, volatile void *addr);
181 void * ioremap_nocache(resource_size_t , unsigned long);
192 void * ioremap(resource_size_t offset, unsigned long size);
197 void iounmap(volatile void *);
28 extern int cpu_number;
464 struct page * alloc_pages_current(gfp_t , unsigned int);
467 struct page * alloc_pages(gfp_t gfp_mask, unsigned int order);
131 void kmemcheck_mark_initialized(void *address, unsigned int n);
154 void kfree(const void *);
318 void * __kmalloc(size_t , gfp_t );
466 void * kmalloc(size_t size, gfp_t flags);
22 __sum16 csum_fold(__wsum sum);
87 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum);
112 __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum);
179 __sum16 csum_ipv6_magic(const struct in6_addr *, const struct in6_addr *, __u32 , __u8 , __wsum );
912 void * dev_get_drvdata(const struct device *dev);
917 void dev_set_drvdata(struct device *dev, void *data);
1135 void dev_err(const struct device *, const char *, ...);
1141 void _dev_info(const struct device *, const char *, ...);
37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );
42 void debug_dma_mapping_error(struct device *, dma_addr_t );
44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
141 struct page * compound_head(struct page *page);
11 extern struct tracepoint __tracepoint_page_ref_mod_and_test;
30 void __page_ref_mod_and_test(struct page *, int, int);
64 int page_ref_count(struct page *page);
136 int page_ref_dec_and_test(struct page *page);
443 int put_page_testzero(struct page *page);
560 void __put_page(struct page *);
752 void put_zone_device_page(struct page *page);
755 bool is_zone_device_page(const struct page *page);
775 void put_page(struct page *page);
1003 void * lowmem_page_address(const struct page *page);
125 int valid_dma_direction(int dma_direction);
28 extern struct dma_map_ops *dma_ops;
30 struct dma_map_ops * get_dma_ops(struct device *dev);
42 bool arch_dma_alloc_attrs(struct device **, gfp_t *);
46 int dma_supported(struct device *, u64 );
169 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
169 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
192 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);
239 dma_addr_t ldv_dma_map_page_6(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
239 dma_addr_t dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
258 void dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
404 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);
445 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
451 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);
471 int ldv_dma_mapping_error_7(struct device *dev, dma_addr_t dma_addr);
471 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
503 int dma_set_mask(struct device *dev, u64 mask);
527 int dma_set_coherent_mask(struct device *dev, u64 mask);
608 void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
325 unsigned int skb_frag_size(const skb_frag_t *frag);
330 void skb_frag_size_set(skb_frag_t *frag, unsigned int size);
897 void consume_skb(struct sk_buff *);
974 int pskb_expand_head(struct sk_buff *, int, int, gfp_t );
1061 void __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4);
1069 void skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type);
1183 unsigned char * skb_end_pointer(const struct sk_buff *skb);
1334 int skb_header_cloned(const struct sk_buff *skb);
1784 bool skb_is_nonlinear(const struct sk_buff *skb);
1789 unsigned int skb_headlen(const struct sk_buff *skb);
1905 unsigned char * skb_put(struct sk_buff *, unsigned int);
1936 unsigned char * __pskb_pull_tail(struct sk_buff *, int);
1952 int pskb_may_pull(struct sk_buff *skb, unsigned int len);
1967 unsigned int skb_headroom(const struct sk_buff *skb);
2126 unsigned char * skb_transport_header(const struct sk_buff *skb);
2143 unsigned char * skb_network_header(const struct sk_buff *skb);
2159 unsigned char * skb_mac_header(const struct sk_buff *skb);
2208 int skb_checksum_start_offset(const struct sk_buff *skb);
2218 int skb_transport_offset(const struct sk_buff *skb);
2391 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );
2427 struct sk_buff * __netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length, gfp_t gfp);
2437 struct sk_buff * netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length);
2527 struct page * skb_frag_page(const skb_frag_t *frag);
2613 void __skb_frag_set_page(skb_frag_t *frag, struct page *page);
2645 dma_addr_t skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, size_t offset, size_t size, enum dma_data_direction dir);
2807 int __skb_linearize(struct sk_buff *skb);
2819 int skb_linearize(struct sk_buff *skb);
3746 void skb_checksum_none_assert(const struct sk_buff *skb);
23 struct iphdr * ip_hdr(const struct sk_buff *skb);
46 void msleep(unsigned int);
26 struct ethhdr * eth_hdr(const struct sk_buff *skb);
93 u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings);
139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *);
144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev);
158 void free_irq(unsigned int, void *);
397 void __napi_schedule(struct napi_struct *);
400 bool napi_disable_pending(struct napi_struct *n);
414 bool napi_schedule_prep(struct napi_struct *n);
427 void napi_schedule(struct napi_struct *n);
464 void napi_complete(struct napi_struct *n);
501 void napi_disable(struct napi_struct *);
510 void napi_enable(struct napi_struct *n);
1946 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);
2041 void * netdev_priv(const struct net_device *dev);
2072 void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int);
2392 int dev_close(struct net_device *);
2407 void free_netdev(struct net_device *);
2828 void netif_tx_start_queue(struct netdev_queue *dev_queue);
2854 void netif_tx_wake_queue(struct netdev_queue *);
2863 void netif_wake_queue(struct net_device *dev);
2878 void netif_tx_stop_queue(struct netdev_queue *dev_queue);
2895 void netif_tx_stop_all_queues(struct net_device *);
3082 bool netif_running(const struct net_device *dev);
3101 void netif_start_subqueue(struct net_device *dev, u16 queue_index);
3115 void netif_stop_subqueue(struct net_device *dev, u16 queue_index);
3142 void netif_wake_subqueue(struct net_device *, u16 );
3180 int netif_set_real_num_tx_queues(struct net_device *, unsigned int);
3183 int netif_set_real_num_rx_queues(struct net_device *, unsigned int);
3212 void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason );
3213 void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason );
3234 void dev_kfree_skb_irq(struct sk_buff *skb);
3244 void dev_kfree_skb_any(struct sk_buff *skb);
3256 int netif_receive_skb(struct sk_buff *);
3257 gro_result_t napi_gro_receive(struct napi_struct *, struct sk_buff *);
3352 bool netif_carrier_ok(const struct net_device *dev);
3361 void netif_carrier_on(struct net_device *);
3363 void netif_carrier_off(struct net_device *);
3430 void netif_device_detach(struct net_device *);
3432 void netif_device_attach(struct net_device *);
3483 void __netif_tx_lock(struct netdev_queue *txq, int cpu);
3503 void __netif_tx_unlock(struct netdev_queue *txq);
3604 void netif_tx_disable(struct net_device *dev);
3674 int register_netdev(struct net_device *);
3675 void unregister_netdev(struct net_device *);
3892 void netdev_rss_key_fill(void *, size_t );
4259 void netdev_err(const struct net_device *, const char *, ...);
4263 void netdev_notice(const struct net_device *, const char *, ...);
4265 void netdev_info(const struct net_device *, const char *, ...);
27 void rtnl_lock();
28 void rtnl_unlock();
27 struct tcphdr * tcp_hdr(const struct sk_buff *skb);
32 unsigned int __tcp_hdrlen(const struct tcphdr *th);
37 unsigned int tcp_hdrlen(const struct sk_buff *skb);
81 struct ipv6hdr * ipv6_hdr(const struct sk_buff *skb);
994 int pci_enable_device(struct pci_dev *);
996 int pci_enable_device_mem(struct pci_dev *);
1011 void pci_disable_device(struct pci_dev *);
1014 void pci_set_master(struct pci_dev *);
1067 int pci_save_state(struct pci_dev *);
1068 void pci_restore_state(struct pci_dev *);
1081 int pci_set_power_state(struct pci_dev *, pci_power_t );
1082 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t );
1085 int __pci_enable_wake(struct pci_dev *, pci_power_t , bool , bool );
1096 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1145 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1147 void pci_release_selected_regions(struct pci_dev *, int);
1194 int __pci_register_driver(struct pci_driver *, struct module *, const char *);
1203 void pci_unregister_driver(struct pci_driver *);
1281 void pci_disable_msi(struct pci_dev *);
1285 void pci_disable_msix(struct pci_dev *);
1287 int pci_msi_enabled();
1288 int pci_enable_msi_range(struct pci_dev *, int, int);
1289 int pci_enable_msi_exact(struct pci_dev *dev, int nvec);
1296 int pci_enable_msix_range(struct pci_dev *, struct msix_entry *, int, int);
1606 void * pci_get_drvdata(struct pci_dev *pdev);
1611 void pci_set_drvdata(struct pci_dev *pdev, void *data);
113 int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
118 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
36 __be16 eth_type_trans(struct sk_buff *, struct net_device *);
52 struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int);
409 void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
481 __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, int *depth);
525 __be16 vlan_get_protocol(struct sk_buff *skb);
203 struct in_device * __in_dev_get_rcu(const struct net_device *dev);
208 struct in_device * in_dev_get(const struct net_device *dev);
232 void in_dev_finish_destroy(struct in_device *);
234 void in_dev_put(struct in_device *idev);
138 void vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring);
148 void vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring);
154 int vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring);
170 void vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring);
442 int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
445 int vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
448 void vmxnet3_force_close(struct vmxnet3_adapter *adapter);
451 void vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
454 void vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
457 void vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
460 int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
463 int vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size, u16 txdata_desc_size, u16 rxdata_desc_size);
467 void vmxnet3_set_ethtool_ops(struct net_device *netdev);
470 struct rtnl_link_stats64 * vmxnet3_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats);
472 char vmxnet3_driver_name[8U];
33 char vmxnet3_driver_name[8U] = { 'v', 'm', 'x', 'n', 'e', 't', '3', '\x0' };
40 const struct pci_device_id vmxnet3_pciid_table[2U] = { { 5549U, 1968U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };
45 const struct pci_device_id __mod_pci__vmxnet3_pciid_table_device_table[2U] = { };
47 int enable_mq = 1;
50 void vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
56 void vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned int intr_idx);
63 void vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned int intr_idx);
73 void vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter);
85 void vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter);
97 void vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events);
104 bool vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
111 void vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
119 void vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
127 void vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
139 void vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue);
173 void vmxnet3_process_events(struct vmxnet3_adapter *adapter);
313 void vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, struct pci_dev *pdev);
330 int vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter);
365 int vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
396 void vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
429 void vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
471 void vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
501 int vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter);
547 void vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter);
562 int vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, int num_to_alloc, struct vmxnet3_adapter *adapter);
653 void vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_buf_info *rbi);
671 int vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter);
808 void vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter);
835 int vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_adapter *adapter);
912 void vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_adapter *adapter);
928 void vmxnet3_prepare_tso(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx);
947 int txd_estimate(const struct sk_buff *skb);
974 int vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter, struct net_device *netdev);
1140 netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1152 void vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, struct sk_buff *skb, union Vmxnet3_GenericDesc *gdesc);
1183 void vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter);
1210 u32 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, union Vmxnet3_GenericDesc *gdesc);
1256 int vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter, int quota);
1545 void vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter);
1585 void vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter);
1594 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter);
1644 void vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter);
1664 int vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter);
1716 int vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter);
1735 int vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter);
1799 int vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter);
1828 int vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget);
1844 int vmxnet3_poll(struct napi_struct *napi, int budget);
1865 int vmxnet3_poll_rx_only(struct napi_struct *napi, int budget);
1899 irqreturn_t vmxnet3_msix_tx(int irq, void *data);
1929 irqreturn_t vmxnet3_msix_rx(int irq, void *data);
1956 irqreturn_t vmxnet3_msix_event(int irq, void *data);
1978 irqreturn_t vmxnet3_intr(int irq, void *dev_id);
2004 void vmxnet3_netpoll(struct net_device *netdev);
2027 int vmxnet3_request_irqs(struct vmxnet3_adapter *adapter);
2148 void vmxnet3_free_irqs(struct vmxnet3_adapter *adapter);
2192 void vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter);
2206 int vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
2228 int vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
2250 u8 * vmxnet3_copy_mc(struct net_device *netdev);
2273 void vmxnet3_set_mc(struct net_device *netdev);
2361 void vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter);
2496 void vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter);
2659 int vmxnet3_set_mac_addr(struct net_device *netdev, void *p);
2674 int vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64);
2744 void vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter);
2756 void vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter);
2867 int vmxnet3_open(struct net_device *netdev);
2924 int vmxnet3_close(struct net_device *netdev);
2966 int vmxnet3_change_mtu(struct net_device *netdev, int new_mtu);
3017 void vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64);
3035 void vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
3059 int vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec);
3087 void vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter);
3169 void vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter);
3181 void vmxnet3_tx_timeout(struct net_device *netdev);
3193 void vmxnet3_reset_work(struct work_struct *data);
3220 int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id);
3493 void vmxnet3_remove_device(struct pci_dev *pdev);
3539 void vmxnet3_shutdown_device(struct pci_dev *pdev);
3569 int vmxnet3_suspend(struct device *device);
3680 int vmxnet3_resume(struct device *device);
3728 const struct dev_pm_ops vmxnet3_pm_ops = { 0, 0, &vmxnet3_suspend, &vmxnet3_resume, &vmxnet3_suspend, 0, 0, &vmxnet3_resume, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3736 struct pci_driver vmxnet3_driver = { { 0, 0 }, (const char *)(&vmxnet3_driver_name), (const struct pci_device_id *)(&vmxnet3_pciid_table), &vmxnet3_probe_device, &vmxnet3_remove_device, 0, 0, 0, 0, &vmxnet3_shutdown_device, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &vmxnet3_pm_ops, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } };
3749 int vmxnet3_init_module();
3760 void vmxnet3_exit_module();
3788 void ldv_check_final_state();
3791 void ldv_check_return_value(int);
3794 void ldv_check_return_value_probe(int);
3797 void ldv_initialize();
3800 void ldv_handler_precall();
3803 int nondet_int();
3806 int LDV_IN_INTERRUPT = 0;
3809 void ldv_main0_sequence_infinite_withcheck_stateful();
27 size_t strlcpy(char *, const char *, size_t );
87 const char * kobject_name(const struct kobject *kobj);
104 int device_set_wakeup_enable(struct device *, bool );
865 const char * dev_name(const struct device *dev);
119 void ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed);
83 u32 ethtool_op_get_link(struct net_device *);
1619 const char * pci_name(const struct pci_dev *pdev);
39 const struct vmxnet3_stat_desc vmxnet3_tq_dev_stats[11U] = { { { 'T', 'x', ' ', 'Q', 'u', 'e', 'u', 'e', '#', '\x0' }, 0 }, { { ' ', ' ', 'T', 'S', 'O', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 0 }, { { ' ', ' ', 'T', 'S', 'O', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 8 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 16 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 24 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 32 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 40 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 48 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 56 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', ' ', 'e', 'r', 'r', '\x0' }, 64 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', ' ', 'd', 'i', 's', 'c', 'a', 'r', 'd', '\x0' }, 72 } };
56 const struct vmxnet3_stat_desc vmxnet3_tq_driver_stats[9U] = { { { ' ', ' ', 'd', 'r', 'v', ' ', 'd', 'r', 'o', 'p', 'p', 'e', 'd', ' ', 't', 'x', ' ', 't', 'o', 't', 'a', 'l', '\x0' }, 0 }, { { ' ', ' ', ' ', ' ', ' ', 't', 'o', 'o', ' ', 'm', 'a', 'n', 'y', ' ', 'f', 'r', 'a', 'g', 's', '\x0' }, 8 }, { { ' ', ' ', ' ', ' ', ' ', 'g', 'i', 'a', 'n', 't', ' ', 'h', 'd', 'r', '\x0' }, 16 }, { { ' ', ' ', ' ', ' ', ' ', 'h', 'd', 'r', ' ', 'e', 'r', 'r', '\x0' }, 24 }, { { ' ', ' ', ' ', ' ', ' ', 't', 's', 'o', '\x0' }, 32 }, { { ' ', ' ', 'r', 'i', 'n', 'g', ' ', 'f', 'u', 'l', 'l', '\x0' }, 40 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 'l', 'i', 'n', 'e', 'a', 'r', 'i', 'z', 'e', 'd', '\x0' }, 48 }, { { ' ', ' ', 'h', 'd', 'r', ' ', 'c', 'l', 'o', 'n', 'e', 'd', '\x0' }, 56 }, { { ' ', ' ', 'g', 'i', 'a', 'n', 't', ' ', 'h', 'd', 'r', '\x0' }, 64 } };
80 const struct vmxnet3_stat_desc vmxnet3_rq_dev_stats[11U] = { { { 'R', 'x', ' ', 'Q', 'u', 'e', 'u', 'e', '#', '\x0' }, 0 }, { { ' ', ' ', 'L', 'R', 'O', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 0 }, { { ' ', ' ', 'L', 'R', 'O', ' ', 'b', 'y', 't', 'e', ' ', 'r', 'x', '\x0' }, 8 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 16 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 'r', 'x', '\x0' }, 24 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 32 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 'r', 'x', '\x0' }, 40 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 48 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 'r', 'x', '\x0' }, 56 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', ' ', 'O', 'O', 'B', '\x0' }, 64 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', ' ', 'e', 'r', 'r', '\x0' }, 72 } };
96 const struct vmxnet3_stat_desc vmxnet3_rq_driver_stats[4U] = { { { ' ', ' ', 'd', 'r', 'v', ' ', 'd', 'r', 'o', 'p', 'p', 'e', 'd', ' ', 'r', 'x', ' ', 't', 'o', 't', 'a', 'l', '\x0' }, 0 }, { { ' ', ' ', ' ', ' ', ' ', 'e', 'r', 'r', '\x0' }, 8 }, { { ' ', ' ', ' ', ' ', ' ', 'f', 'c', 's', '\x0' }, 16 }, { { ' ', ' ', 'r', 'x', ' ', 'b', 'u', 'f', ' ', 'a', 'l', 'l', 'o', 'c', ' ', 'f', 'a', 'i', 'l', '\x0' }, 24 } };
110 const struct vmxnet3_stat_desc vmxnet3_global_stats[1U] = { { { 't', 'x', ' ', 't', 'i', 'm', 'e', 'o', 'u', 't', ' ', 'c', 'o', 'u', 'n', 't', '\x0' }, 8080 } };
169 int vmxnet3_get_sset_count(struct net_device *netdev, int sset);
194 int vmxnet3_get_regs_len(struct net_device *netdev);
207 void vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo);
222 void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf);
302 void vmxnet3_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf);
355 void vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p);
449 void vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
459 int vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
477 int vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd);
499 void vmxnet3_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *param);
519 int vmxnet3_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *param);
662 int vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules);
676 u32 vmxnet3_get_rss_indir_size(struct net_device *netdev);
685 int vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc);
702 int vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, const u8 hfunc);
730 int vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec);
765 int vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec);
885 const struct ethtool_ops vmxnet3_ethtool_ops = { &vmxnet3_get_settings, 0, &vmxnet3_get_drvinfo, &vmxnet3_get_regs_len, &vmxnet3_get_regs, &vmxnet3_get_wol, &vmxnet3_set_wol, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, &vmxnet3_get_coalesce, &vmxnet3_set_coalesce, &vmxnet3_get_ringparam, &vmxnet3_set_ringparam, 0, 0, 0, &vmxnet3_get_strings, 0, &vmxnet3_get_ethtool_stats, 0, 0, 0, 0, &vmxnet3_get_sset_count, &vmxnet3_get_rxnfc, 0, 0, 0, 0, &vmxnet3_get_rss_indir_size, &vmxnet3_get_rss, &vmxnet3_set_rss, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
950 void ldv_main1_sequence_infinite_withcheck_stateful();
10 void ldv_error();
7 bool ldv_is_err(const void *ptr);
14 void * ldv_err_ptr(long error);
21 long int ldv_ptr_err(const void *ptr);
28 bool ldv_is_err_or_null(const void *ptr);
5 int LDV_DMA_MAP_CALLS = 0;
return ;
}
-entry_point
{
3811 struct net_device *var_group1;
3812 int res_vmxnet3_open_73;
3813 int res_vmxnet3_close_74;
3814 struct sk_buff *var_group2;
3815 void *var_vmxnet3_set_mac_addr_68_p1;
3816 int var_vmxnet3_change_mtu_76_p1;
3817 unsigned short var_vmxnet3_vlan_rx_add_vid_57_p1;
3818 unsigned short var_vmxnet3_vlan_rx_add_vid_57_p2;
3819 unsigned short var_vmxnet3_vlan_rx_kill_vid_58_p1;
3820 unsigned short var_vmxnet3_vlan_rx_kill_vid_58_p2;
3821 struct device *var_group3;
3822 struct pci_dev *var_group4;
3823 const struct pci_device_id *var_vmxnet3_probe_device_84_p1;
3824 int res_vmxnet3_probe_device_84;
3825 int var_vmxnet3_intr_52_p0;
3826 void *var_vmxnet3_intr_52_p1;
3827 int var_vmxnet3_msix_event_51_p0;
3828 void *var_vmxnet3_msix_event_51_p1;
3829 int var_vmxnet3_msix_rx_50_p0;
3830 void *var_vmxnet3_msix_rx_50_p1;
3831 int var_vmxnet3_msix_tx_49_p0;
3832 void *var_vmxnet3_msix_tx_49_p1;
3833 int ldv_s_vmxnet3_netdev_ops_net_device_ops;
3834 int ldv_s_vmxnet3_driver_pci_driver;
3835 int tmp;
3836 int tmp___0;
3837 int tmp___1;
5571 ldv_s_vmxnet3_netdev_ops_net_device_ops = 0;
5576 ldv_s_vmxnet3_driver_pci_driver = 0;
5472 LDV_IN_INTERRUPT = 1;
5481 ldv_initialize() { /* Function call is skipped due to function is undefined */}
5568 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
5569 -vmxnet3_init_module()
{
3751 int tmp;
3751 __pr_info("%s - version %s\n", (char *)"VMware vmxnet3 virtual NIC driver", (char *)"1.4.9.0-k-NAPI") { /* Function call is skipped due to function is undefined */}
3753 tmp = __pci_register_driver(&vmxnet3_driver, &__this_module, "vmxnet3") { /* Function call is skipped due to function is undefined */}
3753 return tmp;;
}
5569 assume(!(tmp != 0));
5582 goto ldv_61620;
5582 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
5582 assume(tmp___1 != 0);
5586 goto ldv_61619;
5583 ldv_61619:;
5587 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
5587 switch (tmp___0);
5588 assume(!(tmp___0 == 0));
5692 assume(!(tmp___0 == 1));
5795 assume(!(tmp___0 == 2));
5895 assume(!(tmp___0 == 3));
5995 assume(!(tmp___0 == 4));
6095 assume(!(tmp___0 == 5));
6195 assume(!(tmp___0 == 6));
6295 assume(!(tmp___0 == 7));
6395 assume(!(tmp___0 == 8));
6495 assume(!(tmp___0 == 9));
6593 assume(!(tmp___0 == 10));
6693 assume(tmp___0 == 11);
6779 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
6780 -vmxnet3_resume(var_group3)
{
3682 int err;
3683 unsigned long flags;
3684 struct pci_dev *pdev;
3685 const struct device *__mptr;
3686 struct net_device *netdev;
3687 void *tmp;
3688 struct vmxnet3_adapter *adapter;
3689 void *tmp___0;
3690 _Bool tmp___1;
3691 int tmp___2;
3692 raw_spinlock_t *tmp___3;
3684 __mptr = (const struct device *)device;
3684 pdev = ((struct pci_dev *)__mptr) + 18446744073709551456UL;
3685 -pci_get_drvdata(pdev)
{
1608 void *tmp;
1608 -dev_get_drvdata((const struct device *)(&(pdev->dev)))
{
914 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
914 return __CPAchecker_TMP_0;;
}
1608 return tmp;;
}
3685 netdev = (struct net_device *)tmp;
3686 -netdev_priv((const struct net_device *)netdev)
{
2043 return ((void *)dev) + 3072U;;
}
3686 adapter = (struct vmxnet3_adapter *)tmp___0;
3688 -netif_running((const struct net_device *)netdev)
{
3084 _Bool tmp;
3084 -constant_test_bit(0L, (const volatile unsigned long *)(&(dev->state)))
{
310 return (((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1) != 0;;
}
3084 return ((int)tmp) != 0;;
}
3688 assume(!(tmp___1 == 0));
3688 tmp___2 = 0;
3688 assume(tmp___2 == 0);
3691 pci_set_power_state(pdev, 0) { /* Function call is skipped due to function is undefined */}
3692 pci_restore_state(pdev) { /* Function call is skipped due to function is undefined */}
3693 err = pci_enable_device_mem(pdev) { /* Function call is skipped due to function is undefined */}
3694 assume(!(err != 0));
3697 -pci_enable_wake(pdev, 0, 0)
{
1098 int tmp;
1099 tmp = __pci_enable_wake(dev, state, 0, (int)enable) { /* Function call is skipped due to function is undefined */}
1099 return tmp;;
}
3699 -vmxnet3_alloc_intr_resources(adapter)
{
3089 unsigned int cfg;
3090 unsigned long flags;
3091 raw_spinlock_t *tmp;
3092 int i;
3093 int nvec;
3094 int tmp___0;
3093 -spinlock_check(&(adapter->cmd_lock))
{
291 return &(lock->__annonCompField20.rlock);;
}
3093 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */}
3094 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1);
3094 -writel(4027383816U, __CPAchecker_TMP_0 + 32U)
{
66 Ignored inline assembler code
67 return ;;
}
3096 const volatile void *__CPAchecker_TMP_1 = (const volatile void *)(adapter->hw_addr1);
3096 -readl(__CPAchecker_TMP_1 + 32U)
{
60 unsigned int ret;
58 Ignored inline assembler code
58 return ret;;
}
3097 -spin_unlock_irqrestore(&(adapter->cmd_lock), flags)
{
362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}
363 return ;;
}
3098 adapter->intr.type = (enum vmxnet3_intr_type )(cfg & 3U);
3099 adapter->intr.mask_mode = (enum vmxnet3_intr_mask_mode )((cfg >> 2) & 3U);
3101 assume(!(((unsigned int)(adapter->intr.type)) == 0U));
3106 assume(!(((unsigned int)(adapter->intr.type)) == 3U));
3149 assume(!(((unsigned int)(adapter->intr.type)) == 2U));
3158 adapter->num_rx_queues = 1U;
3159 _dev_info((const struct device *)(&(adapter->netdev->dev)), "Using INTx interrupt, #Rx queues: 1.\n") { /* Function call is skipped due to function is undefined */}
3161 adapter->intr.type = 1;
3164 adapter->intr.num_intrs = 1U;
3165 return ;;
}
3708 -spinlock_check(&(adapter->cmd_lock))
{
291 return &(lock->__annonCompField20.rlock);;
}
3708 flags = _raw_spin_lock_irqsave(tmp___3) { /* Function call is skipped due to function is undefined */}
3709 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1);
3709 -writel(3405643777U, __CPAchecker_TMP_0 + 32U)
{
66 Ignored inline assembler code
67 return ;;
}
3711 -spin_unlock_irqrestore(&(adapter->cmd_lock), flags)
{
362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}
363 return ;;
}
3712 -vmxnet3_tq_cleanup_all(adapter)
{
549 int i;
551 i = 0;
551 goto ldv_60715;
551 assume(!(((u32 )i) < (adapter->num_tx_queues)));
558 return ;;
}
3713 -vmxnet3_rq_cleanup_all(adapter)
{
1587 int i;
1589 i = 0;
1589 goto ldv_60958;
1589 assume(((u32 )i) < (adapter->num_rx_queues));
1591 goto ldv_60957;
1590 ldv_60957:;
1590 -vmxnet3_rq_cleanup(((struct vmxnet3_rx_queue *)(&(adapter->rx_queue))) + ((unsigned long)i), adapter)
{
1547 unsigned int i;
1548 unsigned int ring_idx;
1549 struct Vmxnet3_RxDesc *rxd;
1550 unsigned int tmp;
1551 ring_idx = 0U;
1551 goto ldv_60951;
1551 assume(ring_idx <= 1U);
1553 goto ldv_60950;
1552 ldv_60950:;
1552 i = 0U;
1552 goto ldv_60948;
1552 assume(!((((rq->rx_ring)[ring_idx]).size) > i));
1574 ((rq->rx_ring)[ring_idx]).gen = 1U;
1575 tmp = 0U;
1575 ((rq->rx_ring)[ring_idx]).next2comp = tmp;
1575 ((rq->rx_ring)[ring_idx]).next2fill = tmp;
1551 ring_idx = ring_idx + 1U;
1552 ldv_60951:;
1551 assume(ring_idx <= 1U);
1553 goto ldv_60950;
1552 ldv_60950:;
1552 i = 0U;
1552 goto ldv_60948;
1552 assume(!((((rq->rx_ring)[ring_idx]).size) > i));
1574 ((rq->rx_ring)[ring_idx]).gen = 1U;
1575 tmp = 0U;
1575 ((rq->rx_ring)[ring_idx]).next2comp = tmp;
1575 ((rq->rx_ring)[ring_idx]).next2fill = tmp;
1551 ring_idx = ring_idx + 1U;
1552 ldv_60951:;
1551 assume(!(ring_idx <= 1U));
1579 rq->comp_ring.gen = 1U;
1580 rq->comp_ring.next2proc = 0U;
1581 return ;;
}
1589 i = i + 1;
1590 ldv_60958:;
1589 assume(!(((u32 )i) < (adapter->num_rx_queues)));
1596 return ;;
}
3715 -vmxnet3_reset_dev(adapter)
{
2610 unsigned long flags;
2611 raw_spinlock_t *tmp;
2611 -spinlock_check(&(adapter->cmd_lock))
{
291 return &(lock->__annonCompField20.rlock);;
}
2611 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */}
2612 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1);
2612 -writel(3405643778U, __CPAchecker_TMP_0 + 32U)
{
66 Ignored inline assembler code
67 return ;;
}
2613 -spin_unlock_irqrestore(&(adapter->cmd_lock), flags)
{
362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}
363 return ;;
}
2614 return ;;
}
3716 -vmxnet3_activate_dev(adapter)
{
2525 int err;
2526 int i;
2527 unsigned int ret;
2528 unsigned long flags;
2529 struct _ddebug descriptor;
2530 long tmp;
2531 raw_spinlock_t *tmp___0;
2529 descriptor.modname = "vmxnet3";
2529 descriptor.function = "vmxnet3_activate_dev";
2529 descriptor.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/11688/dscv_tempdir/dscv/ri/331_1a/drivers/net/vmxnet3/vmxnet3_drv.c";
2529 descriptor.format = "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes %u %u %u\n";
2529 descriptor.lineno = 2534U;
2529 descriptor.flags = 0U;
2529 tmp = __builtin_expect(((long)(descriptor.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */}
2529 assume(tmp != 0L);
2529 const struct net_device *__CPAchecker_TMP_0 = (const struct net_device *)(adapter->netdev);
2529 __dynamic_netdev_dbg(&descriptor, __CPAchecker_TMP_0, "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes %u %u %u\n", (char *)(&(adapter->netdev->name)), adapter->skb_buf_size, adapter->rx_buf_per_pkt, ((adapter->tx_queue)[0]).tx_ring.size, ((((adapter->rx_queue)[0]).rx_ring)[0]).size, ((((adapter->rx_queue)[0]).rx_ring)[1]).size) { /* Function call is skipped due to function is undefined */}
2536 -vmxnet3_tq_init_all(adapter)
{
810 int i;
812 i = 0;
812 goto ldv_60780;
812 assume(!(((u32 )i) < (adapter->num_tx_queues)));
819 return ;;
}
2537 -vmxnet3_rq_init_all(adapter)
{
1718 int i;
1719 int err;
1720 long tmp;
1718 err = 0;
1720 i = 0;
1720 goto ldv_61006;
1720 assume(!(((u32 )i) < (adapter->num_rx_queues)));
1726 ldv_61004:;
1729 return err;;
}
2538 assume(!(err != 0));
2544 -vmxnet3_request_irqs(adapter)
{
2029 struct vmxnet3_intr *intr;
2030 int err;
2031 int i;
2032 int vector;
2033 int tmp;
2034 int tmp___0;
2035 struct vmxnet3_rx_queue *rq;
2029 intr = &(adapter->intr);
2030 err = 0;
2031 vector = 0;
2034 assume(!(((unsigned int)(adapter->intr.type)) == 3U));
2101 unsigned int __CPAchecker_TMP_2 = (unsigned int)(intr->type);
2101 assume(!(__CPAchecker_TMP_2 == 2U));
2107 adapter->num_rx_queues = 1U;
2108 void *__CPAchecker_TMP_4 = (void *)(adapter->netdev);
2108 -request_irq(adapter->pdev->irq, &vmxnet3_intr, 128UL, (const char *)(&(adapter->netdev->name)), __CPAchecker_TMP_4)
{
147 int tmp;
147 tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */}
147 return tmp;;
}
2114 intr->num_intrs = ((unsigned int)((u8 )vector)) + 1U;
2115 assume(!(err != 0));
2121 i = 0;
2121 goto ldv_61117;
2121 assume(((u32 )i) < (adapter->num_rx_queues));
2123 goto ldv_61116;
2122 ldv_61116:;
2122 rq = ((struct vmxnet3_rx_queue *)(&(adapter->rx_queue))) + ((unsigned long)i);
2123 rq->qid = (u32 )i;
2124 rq->qid2 = (adapter->num_rx_queues) + ((u32 )i);
2125 rq->dataRingQid = ((adapter->num_rx_queues) * 2U) + ((u32 )i);
2121 i = i + 1;
2122 ldv_61117:;
2121 assume(!(((u32 )i) < (adapter->num_rx_queues)));
2129 i = 0;
2129 goto ldv_61120;
2129 int __CPAchecker_TMP_7 = (int)(intr->num_intrs);
2129 assume(__CPAchecker_TMP_7 > i);
2131 goto ldv_61119;
2130 ldv_61119:;
2130 (intr->mod_levels)[i] = 8U;
2129 i = i + 1;
2130 ldv_61120:;
2129 int __CPAchecker_TMP_7 = (int)(intr->num_intrs);
2129 assume(!(__CPAchecker_TMP_7 > i));
2131 assume(((unsigned int)(adapter->intr.type)) != 3U);
2132 adapter->intr.event_intr_idx = 0U;
2133 i = 0;
2133 goto ldv_61123;
2133 assume(!(((u32 )i) < (adapter->num_tx_queues)));
2135 ((adapter->rx_queue)[0]).comp_ring.intr_idx = 0U;
2138 const struct net_device *__CPAchecker_TMP_8 = (const struct net_device *)(adapter->netdev);
2138 unsigned int __CPAchecker_TMP_9 = (unsigned int)(intr->type);
2138 unsigned int __CPAchecker_TMP_10 = (unsigned int)(intr->mask_mode);
2138 int __CPAchecker_TMP_11 = (int)(intr->num_intrs);
2138 netdev_info(__CPAchecker_TMP_8, "intr type %u, mode %u, %u vectors allocated\n", __CPAchecker_TMP_9, __CPAchecker_TMP_10, __CPAchecker_TMP_11) { /* Function call is skipped due to function is undefined */}
2143 return err;;
}
2545 assume(!(err != 0));
2551 -vmxnet3_setup_driver_shared(adapter)
{
2363 struct Vmxnet3_DriverShared *shared;
2364 struct Vmxnet3_DSDevRead *devRead;
2365 struct Vmxnet3_TxQueueConf *tqc;
2366 struct Vmxnet3_RxQueueConf *rqc;
2367 int i;
2368 struct vmxnet3_tx_queue *tq;
2369 long tmp;
2370 struct vmxnet3_rx_queue *rq;
2371 struct UPT1_RSSConf *rssConf;
2372 unsigned int tmp___0;
2363 shared = adapter->shared;
2364 devRead = &(shared->devRead);
2369 __memset((void *)shared, 0, 720UL) { /* Function call is skipped due to function is undefined */}
2372 shared->magic = 3133079265U;
2373 devRead->misc.driverInfo.version = 17041664U;
2375 devRead->misc.driverInfo.gos.gosBits = 2U;
2377 devRead->misc.driverInfo.gos.gosType = 1U;
2378 *((u32 *)(&(devRead->misc.driverInfo.gos))) = *((u32 *)(&(devRead->misc.driverInfo.gos)));
2380 devRead->misc.driverInfo.vmxnet3RevSpt = 1U;
2381 devRead->misc.driverInfo.uptVerSpt = 1U;
2383 devRead->misc.ddPA = adapter->adapter_pa;
2384 devRead->misc.ddLen = 8256U;
2387 assume(((adapter->netdev->features) & 137438953472ULL) != 0ULL);
2388 devRead->misc.uptFeatures = (devRead->misc.uptFeatures) | 1ULL;
2390 assume(((adapter->netdev->features) & 32768ULL) != 0ULL);
2391 devRead->misc.uptFeatures = (devRead->misc.uptFeatures) | 8ULL;
2392 devRead->misc.maxNumRxSG = 18U;
2394 assume(!(((adapter->netdev->features) & 256ULL) != 0ULL));
2397 devRead->misc.mtu = adapter->netdev->mtu;
2398 devRead->misc.queueDescPA = adapter->queue_desc_pa;
2399 devRead->misc.queueDescLen = ((adapter->num_tx_queues) + (adapter->num_rx_queues)) * 256U;
2404 u8 __CPAchecker_TMP_0 = (u8 )(adapter->num_tx_queues);
2404 devRead->misc.numTxQueues = __CPAchecker_TMP_0;
2405 i = 0;
2405 goto ldv_61221;
2405 assume(!(((u32 )i) < (adapter->num_tx_queues)));
2424 u8 __CPAchecker_TMP_1 = (u8 )(adapter->num_rx_queues);
2424 devRead->misc.numRxQueues = __CPAchecker_TMP_1;
2425 i = 0;
2425 goto ldv_61225;
2425 assume(!(((u32 )i) < (adapter->num_rx_queues)));
2449 void *__CPAchecker_TMP_3 = (void *)(adapter->rss_conf);
2449 __memset(__CPAchecker_TMP_3, 0, 176UL) { /* Function call is skipped due to function is undefined */}
2451 int __CPAchecker_TMP_4 = (int)(adapter->rss);
2451 assume(__CPAchecker_TMP_4 == 0);
2478 devRead->intrConf.autoMask = ((unsigned int)(adapter->intr.mask_mode)) == 0U;
2480 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2481 i = 0;
2481 goto ldv_61232;
2481 assume(!(((int)(adapter->intr.num_intrs)) > i));
2484 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2485 devRead->intrConf.intrCtrl = (devRead->intrConf.intrCtrl) | 1U;
2488 devRead->rxFilterConf.rxMode = 0U;
2489 -vmxnet3_restore_vlan(adapter)
{
2194 u32 *vfTable;
2195 unsigned short vid;
2196 unsigned long tmp;
2197 unsigned long tmp___0;
2194 vfTable = (u32 *)(&(adapter->shared->devRead.rxFilterConf.vfTable));
2198 *vfTable = (*vfTable) | 1U;
2200 tmp = find_first_bit((const unsigned long *)(&(adapter->active_vlans)), 4096UL) { /* Function call is skipped due to function is undefined */}
2200 vid = (u16 )tmp;
2200 goto ldv_61151;
2200 assume(!(((unsigned int)vid) <= 4095U));
2207 return ;;
}
2490 -vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr)
{
2648 unsigned int tmp;
2650 tmp = *((u32 *)mac);
2651 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1);
2651 -writel(tmp, __CPAchecker_TMP_0 + 40U)
{
66 Ignored inline assembler code
67 return ;;
}
2653 tmp = (u32 )((((int)(*(mac + 5UL))) << 8) | ((int)(*(mac + 4UL))));
2654 volatile void *__CPAchecker_TMP_1 = (volatile void *)(adapter->hw_addr1);
2654 -writel(tmp, __CPAchecker_TMP_1 + 48U)
{
66 Ignored inline assembler code
67 return ;;
}
2655 return ;;
}
2491 return ;;
}
2553 unsigned int __CPAchecker_TMP_3 = (unsigned int)(adapter->shared_pa);
2553 volatile void *__CPAchecker_TMP_4 = (volatile void *)(adapter->hw_addr1);
2553 -writel(__CPAchecker_TMP_3, __CPAchecker_TMP_4 + 16U)
{
66 Ignored inline assembler code
67 return ;;
}
2555 volatile void *__CPAchecker_TMP_5 = (volatile void *)(adapter->hw_addr1);
2555 -writel((unsigned int)((adapter->shared_pa) >> 32), __CPAchecker_TMP_5 + 24U)
{
66 Ignored inline assembler code
67 return ;;
}
2557 -spinlock_check(&(adapter->cmd_lock))
{
291 return &(lock->__annonCompField20.rlock);;
}
2557 flags = _raw_spin_lock_irqsave(tmp___0) { /* Function call is skipped due to function is undefined */}
2558 volatile void *__CPAchecker_TMP_6 = (volatile void *)(adapter->hw_addr1);
2558 -writel(3405643776U, __CPAchecker_TMP_6 + 32U)
{
66 Ignored inline assembler code
67 return ;;
}
2560 const volatile void *__CPAchecker_TMP_7 = (const volatile void *)(adapter->hw_addr1);
2560 -readl(__CPAchecker_TMP_7 + 32U)
{
60 unsigned int ret;
58 Ignored inline assembler code
58 return ret;;
}
2561 -spin_unlock_irqrestore(&(adapter->cmd_lock), flags)
{
362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}
363 return ;;
}
2563 assume(!(ret != 0U));
2570 -vmxnet3_init_coalesce(adapter)
{
2498 struct Vmxnet3_DriverShared *shared;
2499 union Vmxnet3_CmdInfo *cmdInfo;
2500 unsigned long flags;
2501 raw_spinlock_t *tmp;
2498 shared = adapter->shared;
2499 cmdInfo = &(shared->cu.cmdInfo);
2502 unsigned int __CPAchecker_TMP_0 = (unsigned int)(adapter->version);
2502 assume(!(__CPAchecker_TMP_0 <= 2U));
2505 -spinlock_check(&(adapter->cmd_lock))
{
291 return &(lock->__annonCompField20.rlock);;
}
2505 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */}
2506 cmdInfo->varConf.confVer = 1U;
2507 cmdInfo->varConf.confLen = 16U;
2509 cmdInfo->varConf.confPA = adapter->coal_conf_pa;
2511 int __CPAchecker_TMP_1 = (int)(adapter->default_coal_mode);
2511 assume(!(__CPAchecker_TMP_1 == 0));
2512 volatile void *__CPAchecker_TMP_2 = (volatile void *)(adapter->hw_addr1);
2512 -writel(4027383819U, __CPAchecker_TMP_2 + 32U)
{
66 Ignored inline assembler code
67 return ;;
}
2519 -spin_unlock_irqrestore(&(adapter->cmd_lock), flags)
{
362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}
363 return ;;
}
2520 return ;;
}
2572 i = 0;
2572 goto ldv_61259;
2572 assume(!(((u32 )i) < (adapter->num_rx_queues)));
2582 -vmxnet3_set_mc(adapter->netdev)
{
2275 struct vmxnet3_adapter *adapter;
2276 void *tmp;
2277 unsigned long flags;
2278 struct Vmxnet3_RxFilterConf *rxConf;
2279 u8 *new_table;
2280 unsigned long long new_table_pa;
2281 unsigned int new_mode;
2282 u32 *vfTable;
2283 unsigned long sz;
2284 int tmp___0;
2285 raw_spinlock_t *tmp___1;
2275 -netdev_priv((const struct net_device *)netdev)
{
2043 return ((void *)dev) + 3072U;;
}
2275 adapter = (struct vmxnet3_adapter *)tmp;
2277 rxConf = &(adapter->shared->devRead.rxFilterConf);
2279 new_table = (u8 *)0U;
2280 new_table_pa = 0ULL;
2281 new_mode = 1U;
2283 assume(!(((netdev->flags) & 256U) != 0U));
2289 -vmxnet3_restore_vlan(adapter)
{
2194 u32 *vfTable;
2195 unsigned short vid;
2196 unsigned long tmp;
2197 unsigned long tmp___0;
2194 vfTable = (u32 *)(&(adapter->shared->devRead.rxFilterConf.vfTable));
2198 *vfTable = (*vfTable) | 1U;
2200 tmp = find_first_bit((const unsigned long *)(&(adapter->active_vlans)), 4096UL) { /* Function call is skipped due to function is undefined */}
2200 vid = (u16 )tmp;
2200 goto ldv_61151;
2200 assume(!(((unsigned int)vid) <= 4095U));
2207 return ;;
}
2292 assume(!(((netdev->flags) & 2U) != 0U));
2295 assume(!(((netdev->flags) & 512U) != 0U));
2298 assume((netdev->mc.count) != 0);
2299 -vmxnet3_copy_mc(netdev)
{
2252 u8 *buf;
2253 unsigned int sz;
2254 void *tmp;
2255 struct netdev_hw_addr *ha;
2256 int i;
2257 const struct list_head *__mptr;
2258 int tmp___0;
2259 const struct list_head *__mptr___0;
2252 buf = (u8 *)0U;
2253 sz = (u32 )((netdev->mc.count) * 6);
2256 assume(!(sz <= 65535U));
2268 return buf;;
}
2300 assume(!(((unsigned long)new_table) != ((unsigned long)((u8 *)0U))));
2311 -dma_mapping_error(&(adapter->pdev->dev), new_table_pa)
{
53 int tmp;
54 -ldv_dma_mapping_error()
{
18 assume(LDV_DMA_MAP_CALLS == 0);
18 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_BITOPS_H 2 #define _ASM_X86_BITOPS_H 3 4 /* 5 * Copyright 1992, Linus Torvalds. 6 * 7 * Note: inlines with more than a single statement should be marked 8 * __always_inline to avoid problems with older gcc's inlining heuristics. 9 */ 10 11 #ifndef _LINUX_BITOPS_H 12 #error only <linux/bitops.h> can be included directly 13 #endif 14 15 #include <linux/compiler.h> 16 #include <asm/alternative.h> 17 #include <asm/rmwcc.h> 18 #include <asm/barrier.h> 19 20 #if BITS_PER_LONG == 32 21 # define _BITOPS_LONG_SHIFT 5 22 #elif BITS_PER_LONG == 64 23 # define _BITOPS_LONG_SHIFT 6 24 #else 25 # error "Unexpected BITS_PER_LONG" 26 #endif 27 28 #define BIT_64(n) (U64_C(1) << (n)) 29 30 /* 31 * These have to be done with inline assembly: that way the bit-setting 32 * is guaranteed to be atomic. All bit operations return 0 if the bit 33 * was cleared before the operation and != 0 if it was not. 34 * 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 36 */ 37 38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) 39 /* Technically wrong, but this avoids compilation errors on some gcc 40 versions. */ 41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) 42 #else 43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) 44 #endif 45 46 #define ADDR BITOP_ADDR(addr) 47 48 /* 49 * We do the locked ops that don't return the old value as 50 * a mask operation on a byte. 51 */ 52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) 53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) 54 #define CONST_MASK(nr) (1 << ((nr) & 7)) 55 56 /** 57 * set_bit - Atomically set a bit in memory 58 * @nr: the bit to set 59 * @addr: the address to start counting from 60 * 61 * This function is atomic and may not be reordered. See __set_bit() 62 * if you do not require the atomic guarantees. 63 * 64 * Note: there are no guarantees that this function will not be reordered 65 * on non x86 architectures, so if you are writing portable code, 66 * make sure not to rely on its reordering guarantees. 67 * 68 * Note that @nr may be almost arbitrarily large; this function is not 69 * restricted to acting on a single-word quantity. 70 */ 71 static __always_inline void 72 set_bit(long nr, volatile unsigned long *addr) 73 { 74 if (IS_IMMEDIATE(nr)) { 75 asm volatile(LOCK_PREFIX "orb %1,%0" 76 : CONST_MASK_ADDR(nr, addr) 77 : "iq" ((u8)CONST_MASK(nr)) 78 : "memory"); 79 } else { 80 asm volatile(LOCK_PREFIX "bts %1,%0" 81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); 82 } 83 } 84 85 /** 86 * __set_bit - Set a bit in memory 87 * @nr: the bit to set 88 * @addr: the address to start counting from 89 * 90 * Unlike set_bit(), this function is non-atomic and may be reordered. 91 * If it's called on the same region of memory simultaneously, the effect 92 * may be that only one operation succeeds. 93 */ 94 static __always_inline void __set_bit(long nr, volatile unsigned long *addr) 95 { 96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 97 } 98 99 /** 100 * clear_bit - Clears a bit in memory 101 * @nr: Bit to clear 102 * @addr: Address to start counting from 103 * 104 * clear_bit() is atomic and may not be reordered. However, it does 105 * not contain a memory barrier, so if it is used for locking purposes, 106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 107 * in order to ensure changes are visible on other processors. 108 */ 109 static __always_inline void 110 clear_bit(long nr, volatile unsigned long *addr) 111 { 112 if (IS_IMMEDIATE(nr)) { 113 asm volatile(LOCK_PREFIX "andb %1,%0" 114 : CONST_MASK_ADDR(nr, addr) 115 : "iq" ((u8)~CONST_MASK(nr))); 116 } else { 117 asm volatile(LOCK_PREFIX "btr %1,%0" 118 : BITOP_ADDR(addr) 119 : "Ir" (nr)); 120 } 121 } 122 123 /* 124 * clear_bit_unlock - Clears a bit in memory 125 * @nr: Bit to clear 126 * @addr: Address to start counting from 127 * 128 * clear_bit() is atomic and implies release semantics before the memory 129 * operation. It can be used for an unlock. 130 */ 131 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr) 132 { 133 barrier(); 134 clear_bit(nr, addr); 135 } 136 137 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) 138 { 139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 140 } 141 142 /* 143 * __clear_bit_unlock - Clears a bit in memory 144 * @nr: Bit to clear 145 * @addr: Address to start counting from 146 * 147 * __clear_bit() is non-atomic and implies release semantics before the memory 148 * operation. It can be used for an unlock if no other CPUs can concurrently 149 * modify other bits in the word. 150 * 151 * No memory barrier is required here, because x86 cannot reorder stores past 152 * older loads. Same principle as spin_unlock. 153 */ 154 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) 155 { 156 barrier(); 157 __clear_bit(nr, addr); 158 } 159 160 /** 161 * __change_bit - Toggle a bit in memory 162 * @nr: the bit to change 163 * @addr: the address to start counting from 164 * 165 * Unlike change_bit(), this function is non-atomic and may be reordered. 166 * If it's called on the same region of memory simultaneously, the effect 167 * may be that only one operation succeeds. 168 */ 169 static __always_inline void __change_bit(long nr, volatile unsigned long *addr) 170 { 171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 172 } 173 174 /** 175 * change_bit - Toggle a bit in memory 176 * @nr: Bit to change 177 * @addr: Address to start counting from 178 * 179 * change_bit() is atomic and may not be reordered. 180 * Note that @nr may be almost arbitrarily large; this function is not 181 * restricted to acting on a single-word quantity. 182 */ 183 static __always_inline void change_bit(long nr, volatile unsigned long *addr) 184 { 185 if (IS_IMMEDIATE(nr)) { 186 asm volatile(LOCK_PREFIX "xorb %1,%0" 187 : CONST_MASK_ADDR(nr, addr) 188 : "iq" ((u8)CONST_MASK(nr))); 189 } else { 190 asm volatile(LOCK_PREFIX "btc %1,%0" 191 : BITOP_ADDR(addr) 192 : "Ir" (nr)); 193 } 194 } 195 196 /** 197 * test_and_set_bit - Set a bit and return its old value 198 * @nr: Bit to set 199 * @addr: Address to count from 200 * 201 * This operation is atomic and cannot be reordered. 202 * It also implies a memory barrier. 203 */ 204 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 205 { 206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); 207 } 208 209 /** 210 * test_and_set_bit_lock - Set a bit and return its old value for lock 211 * @nr: Bit to set 212 * @addr: Address to count from 213 * 214 * This is the same as test_and_set_bit on x86. 215 */ 216 static __always_inline bool 217 test_and_set_bit_lock(long nr, volatile unsigned long *addr) 218 { 219 return test_and_set_bit(nr, addr); 220 } 221 222 /** 223 * __test_and_set_bit - Set a bit and return its old value 224 * @nr: Bit to set 225 * @addr: Address to count from 226 * 227 * This operation is non-atomic and can be reordered. 228 * If two examples of this operation race, one can appear to succeed 229 * but actually fail. You must protect multiple accesses with a lock. 230 */ 231 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) 232 { 233 bool oldbit; 234 235 asm("bts %2,%1\n\t" 236 CC_SET(c) 237 : CC_OUT(c) (oldbit), ADDR 238 : "Ir" (nr)); 239 return oldbit; 240 } 241 242 /** 243 * test_and_clear_bit - Clear a bit and return its old value 244 * @nr: Bit to clear 245 * @addr: Address to count from 246 * 247 * This operation is atomic and cannot be reordered. 248 * It also implies a memory barrier. 249 */ 250 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 251 { 252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); 253 } 254 255 /** 256 * __test_and_clear_bit - Clear a bit and return its old value 257 * @nr: Bit to clear 258 * @addr: Address to count from 259 * 260 * This operation is non-atomic and can be reordered. 261 * If two examples of this operation race, one can appear to succeed 262 * but actually fail. You must protect multiple accesses with a lock. 263 * 264 * Note: the operation is performed atomically with respect to 265 * the local CPU, but not other CPUs. Portable code should not 266 * rely on this behaviour. 267 * KVM relies on this behaviour on x86 for modifying memory that is also 268 * accessed from a hypervisor on the same CPU if running in a VM: don't change 269 * this without also updating arch/x86/kernel/kvm.c 270 */ 271 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) 272 { 273 bool oldbit; 274 275 asm volatile("btr %2,%1\n\t" 276 CC_SET(c) 277 : CC_OUT(c) (oldbit), ADDR 278 : "Ir" (nr)); 279 return oldbit; 280 } 281 282 /* WARNING: non atomic and it can be reordered! */ 283 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) 284 { 285 bool oldbit; 286 287 asm volatile("btc %2,%1\n\t" 288 CC_SET(c) 289 : CC_OUT(c) (oldbit), ADDR 290 : "Ir" (nr) : "memory"); 291 292 return oldbit; 293 } 294 295 /** 296 * test_and_change_bit - Change a bit and return its old value 297 * @nr: Bit to change 298 * @addr: Address to count from 299 * 300 * This operation is atomic and cannot be reordered. 301 * It also implies a memory barrier. 302 */ 303 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 304 { 305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); 306 } 307 308 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) 309 { 310 return ((1UL << (nr & (BITS_PER_LONG-1))) & 311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; 312 } 313 314 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) 315 { 316 bool oldbit; 317 318 asm volatile("bt %2,%1\n\t" 319 CC_SET(c) 320 : CC_OUT(c) (oldbit) 321 : "m" (*(unsigned long *)addr), "Ir" (nr)); 322 323 return oldbit; 324 } 325 326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */ 327 /** 328 * test_bit - Determine whether a bit is set 329 * @nr: bit number to test 330 * @addr: Address to start counting from 331 */ 332 static bool test_bit(int nr, const volatile unsigned long *addr); 333 #endif 334 335 #define test_bit(nr, addr) \ 336 (__builtin_constant_p((nr)) \ 337 ? constant_test_bit((nr), (addr)) \ 338 : variable_test_bit((nr), (addr))) 339 340 /** 341 * __ffs - find first set bit in word 342 * @word: The word to search 343 * 344 * Undefined if no bit exists, so code should check against 0 first. 345 */ 346 static __always_inline unsigned long __ffs(unsigned long word) 347 { 348 asm("rep; bsf %1,%0" 349 : "=r" (word) 350 : "rm" (word)); 351 return word; 352 } 353 354 /** 355 * ffz - find first zero bit in word 356 * @word: The word to search 357 * 358 * Undefined if no zero exists, so code should check against ~0UL first. 359 */ 360 static __always_inline unsigned long ffz(unsigned long word) 361 { 362 asm("rep; bsf %1,%0" 363 : "=r" (word) 364 : "r" (~word)); 365 return word; 366 } 367 368 /* 369 * __fls: find last set bit in word 370 * @word: The word to search 371 * 372 * Undefined if no set bit exists, so code should check against 0 first. 373 */ 374 static __always_inline unsigned long __fls(unsigned long word) 375 { 376 asm("bsr %1,%0" 377 : "=r" (word) 378 : "rm" (word)); 379 return word; 380 } 381 382 #undef ADDR 383 384 #ifdef __KERNEL__ 385 /** 386 * ffs - find first set bit in word 387 * @x: the word to search 388 * 389 * This is defined the same way as the libc and compiler builtin ffs 390 * routines, therefore differs in spirit from the other bitops. 391 * 392 * ffs(value) returns 0 if value is 0 or the position of the first 393 * set bit if value is nonzero. The first (least significant) bit 394 * is at position 1. 395 */ 396 static __always_inline int ffs(int x) 397 { 398 int r; 399 400 #ifdef CONFIG_X86_64 401 /* 402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the 403 * dest reg is undefined if x==0, but their CPU architect says its 404 * value is written to set it to the same as before, except that the 405 * top 32 bits will be cleared. 406 * 407 * We cannot do this on 32 bits because at the very least some 408 * 486 CPUs did not behave this way. 409 */ 410 asm("bsfl %1,%0" 411 : "=r" (r) 412 : "rm" (x), "0" (-1)); 413 #elif defined(CONFIG_X86_CMOV) 414 asm("bsfl %1,%0\n\t" 415 "cmovzl %2,%0" 416 : "=&r" (r) : "rm" (x), "r" (-1)); 417 #else 418 asm("bsfl %1,%0\n\t" 419 "jnz 1f\n\t" 420 "movl $-1,%0\n" 421 "1:" : "=r" (r) : "rm" (x)); 422 #endif 423 return r + 1; 424 } 425 426 /** 427 * fls - find last set bit in word 428 * @x: the word to search 429 * 430 * This is defined in a similar way as the libc and compiler builtin 431 * ffs, but returns the position of the most significant set bit. 432 * 433 * fls(value) returns 0 if value is 0 or the position of the last 434 * set bit if value is nonzero. The last (most significant) bit is 435 * at position 32. 436 */ 437 static __always_inline int fls(int x) 438 { 439 int r; 440 441 #ifdef CONFIG_X86_64 442 /* 443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the 444 * dest reg is undefined if x==0, but their CPU architect says its 445 * value is written to set it to the same as before, except that the 446 * top 32 bits will be cleared. 447 * 448 * We cannot do this on 32 bits because at the very least some 449 * 486 CPUs did not behave this way. 450 */ 451 asm("bsrl %1,%0" 452 : "=r" (r) 453 : "rm" (x), "0" (-1)); 454 #elif defined(CONFIG_X86_CMOV) 455 asm("bsrl %1,%0\n\t" 456 "cmovzl %2,%0" 457 : "=&r" (r) : "rm" (x), "rm" (-1)); 458 #else 459 asm("bsrl %1,%0\n\t" 460 "jnz 1f\n\t" 461 "movl $-1,%0\n" 462 "1:" : "=r" (r) : "rm" (x)); 463 #endif 464 return r + 1; 465 } 466 467 /** 468 * fls64 - find last set bit in a 64-bit word 469 * @x: the word to search 470 * 471 * This is defined in a similar way as the libc and compiler builtin 472 * ffsll, but returns the position of the most significant set bit. 473 * 474 * fls64(value) returns 0 if value is 0 or the position of the last 475 * set bit if value is nonzero. The last (most significant) bit is 476 * at position 64. 477 */ 478 #ifdef CONFIG_X86_64 479 static __always_inline int fls64(__u64 x) 480 { 481 int bitpos = -1; 482 /* 483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the 484 * dest reg is undefined if x==0, but their CPU architect says its 485 * value is written to set it to the same as before. 486 */ 487 asm("bsrq %1,%q0" 488 : "+r" (bitpos) 489 : "rm" (x)); 490 return bitpos + 1; 491 } 492 #else 493 #include <asm-generic/bitops/fls64.h> 494 #endif 495 496 #include <asm-generic/bitops/find.h> 497 498 #include <asm-generic/bitops/sched.h> 499 500 #include <asm/arch_hweight.h> 501 502 #include <asm-generic/bitops/const_hweight.h> 503 504 #include <asm-generic/bitops/le.h> 505 506 #include <asm-generic/bitops/ext2-atomic-setbit.h> 507 508 #endif /* __KERNEL__ */ 509 #endif /* _ASM_X86_BITOPS_H */
1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 /* 5 * This file contains the definitions for the x86 IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated 11 * to (a) handle it all in a way that makes gcc able to optimize it 12 * as well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 */ 16 17 /* 18 * Thanks to James van Artsdalen for a better timing-fix than 19 * the two short jumps: using outb's to a nonexistent port seems 20 * to guarantee better timings even on fast machines. 21 * 22 * On the other hand, I'd like to be sure of a non-existent port: 23 * I feel a bit unsafe about using 0x80 (should be safe, though) 24 * 25 * Linus 26 */ 27 28 /* 29 * Bit simplified and optimized by Jan Hubicka 30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 31 * 32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 33 * isa_read[wl] and isa_write[wl] fixed 34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 35 */ 36 37 #define ARCH_HAS_IOREMAP_WC 38 #define ARCH_HAS_IOREMAP_WT 39 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <asm/page.h> 43 #include <asm/early_ioremap.h> 44 #include <asm/pgtable_types.h> 45 46 #define build_mmio_read(name, size, type, reg, barrier) \ 47 static inline type name(const volatile void __iomem *addr) \ 48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 49 :"m" (*(volatile type __force *)addr) barrier); return ret; } 50 51 #define build_mmio_write(name, size, type, reg, barrier) \ 52 static inline void name(type val, volatile void __iomem *addr) \ 53 { asm volatile("mov" size " %0,%1": :reg (val), \ 54 "m" (*(volatile type __force *)addr) barrier); } 55 56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 59 60 build_mmio_read(__readb, "b", unsigned char, "=q", ) 61 build_mmio_read(__readw, "w", unsigned short, "=r", ) 62 build_mmio_read(__readl, "l", unsigned int, "=r", ) 63 64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 65 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 66 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 67 68 build_mmio_write(__writeb, "b", unsigned char, "q", ) 69 build_mmio_write(__writew, "w", unsigned short, "r", ) 70 build_mmio_write(__writel, "l", unsigned int, "r", ) 71 72 #define readb_relaxed(a) __readb(a) 73 #define readw_relaxed(a) __readw(a) 74 #define readl_relaxed(a) __readl(a) 75 #define __raw_readb __readb 76 #define __raw_readw __readw 77 #define __raw_readl __readl 78 79 #define writeb_relaxed(v, a) __writeb(v, a) 80 #define writew_relaxed(v, a) __writew(v, a) 81 #define writel_relaxed(v, a) __writel(v, a) 82 #define __raw_writeb __writeb 83 #define __raw_writew __writew 84 #define __raw_writel __writel 85 86 #define mmiowb() barrier() 87 88 #ifdef CONFIG_X86_64 89 90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 92 93 #define readq_relaxed(a) readq(a) 94 #define writeq_relaxed(v, a) writeq(v, a) 95 96 #define __raw_readq(a) readq(a) 97 #define __raw_writeq(val, addr) writeq(val, addr) 98 99 /* Let people know that we have them */ 100 #define readq readq 101 #define writeq writeq 102 103 #endif 104 105 /** 106 * virt_to_phys - map virtual addresses to physical 107 * @address: address to remap 108 * 109 * The returned physical address is the physical (CPU) mapping for 110 * the memory address given. It is only valid to use this function on 111 * addresses directly mapped or allocated via kmalloc. 112 * 113 * This function does not give bus mappings for DMA transfers. In 114 * almost all conceivable cases a device driver should not be using 115 * this function 116 */ 117 118 static inline phys_addr_t virt_to_phys(volatile void *address) 119 { 120 return __pa(address); 121 } 122 123 /** 124 * phys_to_virt - map physical address to virtual 125 * @address: address to remap 126 * 127 * The returned virtual address is a current CPU mapping for 128 * the memory address given. It is only valid to use this function on 129 * addresses that have a kernel mapping 130 * 131 * This function does not handle bus mappings for DMA transfers. In 132 * almost all conceivable cases a device driver should not be using 133 * this function 134 */ 135 136 static inline void *phys_to_virt(phys_addr_t address) 137 { 138 return __va(address); 139 } 140 141 /* 142 * Change "struct page" to physical address. 143 */ 144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 145 146 /* 147 * ISA I/O bus memory addresses are 1:1 with the physical address. 148 * However, we truncate the address to unsigned int to avoid undesirable 149 * promitions in legacy drivers. 150 */ 151 static inline unsigned int isa_virt_to_bus(volatile void *address) 152 { 153 return (unsigned int)virt_to_phys(address); 154 } 155 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 156 #define isa_bus_to_virt phys_to_virt 157 158 /* 159 * However PCI ones are not necessarily 1:1 and therefore these interfaces 160 * are forbidden in portable PCI drivers. 161 * 162 * Allow them on x86 for legacy drivers, though. 163 */ 164 #define virt_to_bus virt_to_phys 165 #define bus_to_virt phys_to_virt 166 167 /** 168 * ioremap - map bus memory into CPU space 169 * @offset: bus address of the memory 170 * @size: size of the resource to map 171 * 172 * ioremap performs a platform specific sequence of operations to 173 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 174 * writew/writel functions and the other mmio helpers. The returned 175 * address is not guaranteed to be usable directly as a virtual 176 * address. 177 * 178 * If the area you are trying to map is a PCI BAR you should have a 179 * look at pci_iomap(). 180 */ 181 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 182 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); 183 #define ioremap_uc ioremap_uc 184 185 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 186 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 187 unsigned long prot_val); 188 189 /* 190 * The default ioremap() behavior is non-cached: 191 */ 192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 193 { 194 return ioremap_nocache(offset, size); 195 } 196 197 extern void iounmap(volatile void __iomem *addr); 198 199 extern void set_iounmap_nonlazy(void); 200 201 #ifdef __KERNEL__ 202 203 #include <asm-generic/iomap.h> 204 205 /* 206 * Convert a virtual cached pointer to an uncached pointer 207 */ 208 #define xlate_dev_kmem_ptr(p) p 209 210 static inline void 211 memset_io(volatile void __iomem *addr, unsigned char val, size_t count) 212 { 213 memset((void __force *)addr, val, count); 214 } 215 216 static inline void 217 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) 218 { 219 memcpy(dst, (const void __force *)src, count); 220 } 221 222 static inline void 223 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) 224 { 225 memcpy((void __force *)dst, src, count); 226 } 227 228 /* 229 * ISA space is 'always mapped' on a typical x86 system, no need to 230 * explicitly ioremap() it. The fact that the ISA IO space is mapped 231 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 232 * are physical addresses. The following constant pointer can be 233 * used as the IO-area pointer (it can be iounmapped as well, so the 234 * analogy with PCI is quite large): 235 */ 236 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 237 238 /* 239 * Cache management 240 * 241 * This needed for two cases 242 * 1. Out of order aware processors 243 * 2. Accidentally out of order processors (PPro errata #51) 244 */ 245 246 static inline void flush_write_buffers(void) 247 { 248 #if defined(CONFIG_X86_PPRO_FENCE) 249 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 250 #endif 251 } 252 253 #endif /* __KERNEL__ */ 254 255 extern void native_io_delay(void); 256 257 extern int io_delay_type; 258 extern void io_delay_init(void); 259 260 #if defined(CONFIG_PARAVIRT) 261 #include <asm/paravirt.h> 262 #else 263 264 static inline void slow_down_io(void) 265 { 266 native_io_delay(); 267 #ifdef REALLY_SLOW_IO 268 native_io_delay(); 269 native_io_delay(); 270 native_io_delay(); 271 #endif 272 } 273 274 #endif 275 276 #define BUILDIO(bwl, bw, type) \ 277 static inline void out##bwl(unsigned type value, int port) \ 278 { \ 279 asm volatile("out" #bwl " %" #bw "0, %w1" \ 280 : : "a"(value), "Nd"(port)); \ 281 } \ 282 \ 283 static inline unsigned type in##bwl(int port) \ 284 { \ 285 unsigned type value; \ 286 asm volatile("in" #bwl " %w1, %" #bw "0" \ 287 : "=a"(value) : "Nd"(port)); \ 288 return value; \ 289 } \ 290 \ 291 static inline void out##bwl##_p(unsigned type value, int port) \ 292 { \ 293 out##bwl(value, port); \ 294 slow_down_io(); \ 295 } \ 296 \ 297 static inline unsigned type in##bwl##_p(int port) \ 298 { \ 299 unsigned type value = in##bwl(port); \ 300 slow_down_io(); \ 301 return value; \ 302 } \ 303 \ 304 static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 305 { \ 306 asm volatile("rep; outs" #bwl \ 307 : "+S"(addr), "+c"(count) : "d"(port)); \ 308 } \ 309 \ 310 static inline void ins##bwl(int port, void *addr, unsigned long count) \ 311 { \ 312 asm volatile("rep; ins" #bwl \ 313 : "+D"(addr), "+c"(count) : "d"(port)); \ 314 } 315 316 BUILDIO(b, b, char) 317 BUILDIO(w, w, short) 318 BUILDIO(l, , int) 319 320 extern void *xlate_dev_mem_ptr(phys_addr_t phys); 321 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); 322 323 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 324 enum page_cache_mode pcm); 325 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 326 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); 327 328 extern bool is_early_ioremap_ptep(pte_t *ptep); 329 330 #ifdef CONFIG_XEN 331 #include <xen/xen.h> 332 struct bio_vec; 333 334 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 335 const struct bio_vec *vec2); 336 337 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 338 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ 339 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) 340 #endif /* CONFIG_XEN */ 341 342 #define IO_SPACE_LIMIT 0xffff 343 344 #ifdef CONFIG_MTRR 345 extern int __must_check arch_phys_wc_index(int handle); 346 #define arch_phys_wc_index arch_phys_wc_index 347 348 extern int __must_check arch_phys_wc_add(unsigned long base, 349 unsigned long size); 350 extern void arch_phys_wc_del(int handle); 351 #define arch_phys_wc_add arch_phys_wc_add 352 #endif 353 354 #endif /* _ASM_X86_IO_H */
1 2 /* 3 * Linux driver for VMware's vmxnet3 ethernet NIC. 4 * 5 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the 9 * Free Software Foundation; version 2 of the License and no later version. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 14 * NON INFRINGEMENT. See the GNU General Public License for more 15 * details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * The full GNU General Public License is included in this distribution in 22 * the file called "COPYING". 23 * 24 * Maintained by: pv-drivers@vmware.com 25 * 26 */ 27 28 #include <linux/module.h> 29 #include <net/ip6_checksum.h> 30 31 #include "vmxnet3_int.h" 32 33 char vmxnet3_driver_name[] = "vmxnet3"; 34 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 35 36 /* 37 * PCI Device ID Table 38 * Last entry must be all 0s 39 */ 40 static const struct pci_device_id vmxnet3_pciid_table[] = { 41 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 42 {0} 43 }; 44 45 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); 46 47 static int enable_mq = 1; 48 49 static void 50 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); 51 52 /* 53 * Enable/Disable the given intr 54 */ 55 static void 56 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 57 { 58 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); 59 } 60 61 62 static void 63 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 64 { 65 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); 66 } 67 68 69 /* 70 * Enable/Disable all intrs used by the device 71 */ 72 static void 73 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) 74 { 75 int i; 76 77 for (i = 0; i < adapter->intr.num_intrs; i++) 78 vmxnet3_enable_intr(adapter, i); 79 adapter->shared->devRead.intrConf.intrCtrl &= 80 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL); 81 } 82 83 84 static void 85 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) 86 { 87 int i; 88 89 adapter->shared->devRead.intrConf.intrCtrl |= 90 cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 91 for (i = 0; i < adapter->intr.num_intrs; i++) 92 vmxnet3_disable_intr(adapter, i); 93 } 94 95 96 static void 97 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) 98 { 99 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); 100 } 101 102 103 static bool 104 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 105 { 106 return tq->stopped; 107 } 108 109 110 static void 111 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 112 { 113 tq->stopped = false; 114 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); 115 } 116 117 118 static void 119 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 120 { 121 tq->stopped = false; 122 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 123 } 124 125 126 static void 127 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 128 { 129 tq->stopped = true; 130 tq->num_stop++; 131 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 132 } 133 134 135 /* 136 * Check the link state. This may start or stop the tx queue. 137 */ 138 static void 139 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) 140 { 141 u32 ret; 142 int i; 143 unsigned long flags; 144 145 spin_lock_irqsave(&adapter->cmd_lock, flags); 146 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 147 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 148 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 149 150 adapter->link_speed = ret >> 16; 151 if (ret & 1) { /* Link is up. */ 152 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", 153 adapter->link_speed); 154 netif_carrier_on(adapter->netdev); 155 156 if (affectTxQueue) { 157 for (i = 0; i < adapter->num_tx_queues; i++) 158 vmxnet3_tq_start(&adapter->tx_queue[i], 159 adapter); 160 } 161 } else { 162 netdev_info(adapter->netdev, "NIC Link is Down\n"); 163 netif_carrier_off(adapter->netdev); 164 165 if (affectTxQueue) { 166 for (i = 0; i < adapter->num_tx_queues; i++) 167 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); 168 } 169 } 170 } 171 172 static void 173 vmxnet3_process_events(struct vmxnet3_adapter *adapter) 174 { 175 int i; 176 unsigned long flags; 177 u32 events = le32_to_cpu(adapter->shared->ecr); 178 if (!events) 179 return; 180 181 vmxnet3_ack_events(adapter, events); 182 183 /* Check if link state has changed */ 184 if (events & VMXNET3_ECR_LINK) 185 vmxnet3_check_link(adapter, true); 186 187 /* Check if there is an error on xmit/recv queues */ 188 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 189 spin_lock_irqsave(&adapter->cmd_lock, flags); 190 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 191 VMXNET3_CMD_GET_QUEUE_STATUS); 192 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 193 194 for (i = 0; i < adapter->num_tx_queues; i++) 195 if (adapter->tqd_start[i].status.stopped) 196 dev_err(&adapter->netdev->dev, 197 "%s: tq[%d] error 0x%x\n", 198 adapter->netdev->name, i, le32_to_cpu( 199 adapter->tqd_start[i].status.error)); 200 for (i = 0; i < adapter->num_rx_queues; i++) 201 if (adapter->rqd_start[i].status.stopped) 202 dev_err(&adapter->netdev->dev, 203 "%s: rq[%d] error 0x%x\n", 204 adapter->netdev->name, i, 205 adapter->rqd_start[i].status.error); 206 207 schedule_work(&adapter->work); 208 } 209 } 210 211 #ifdef __BIG_ENDIAN_BITFIELD 212 /* 213 * The device expects the bitfields in shared structures to be written in 214 * little endian. When CPU is big endian, the following routines are used to 215 * correctly read and write into ABI. 216 * The general technique used here is : double word bitfields are defined in 217 * opposite order for big endian architecture. Then before reading them in 218 * driver the complete double word is translated using le32_to_cpu. Similarly 219 * After the driver writes into bitfields, cpu_to_le32 is used to translate the 220 * double words into required format. 221 * In order to avoid touching bits in shared structure more than once, temporary 222 * descriptors are used. These are passed as srcDesc to following functions. 223 */ 224 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc, 225 struct Vmxnet3_RxDesc *dstDesc) 226 { 227 u32 *src = (u32 *)srcDesc + 2; 228 u32 *dst = (u32 *)dstDesc + 2; 229 dstDesc->addr = le64_to_cpu(srcDesc->addr); 230 *dst = le32_to_cpu(*src); 231 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); 232 } 233 234 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc, 235 struct Vmxnet3_TxDesc *dstDesc) 236 { 237 int i; 238 u32 *src = (u32 *)(srcDesc + 1); 239 u32 *dst = (u32 *)(dstDesc + 1); 240 241 /* Working backwards so that the gen bit is set at the end. */ 242 for (i = 2; i > 0; i--) { 243 src--; 244 dst--; 245 *dst = cpu_to_le32(*src); 246 } 247 } 248 249 250 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc, 251 struct Vmxnet3_RxCompDesc *dstDesc) 252 { 253 int i = 0; 254 u32 *src = (u32 *)srcDesc; 255 u32 *dst = (u32 *)dstDesc; 256 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) { 257 *dst = le32_to_cpu(*src); 258 src++; 259 dst++; 260 } 261 } 262 263 264 /* Used to read bitfield values from double words. */ 265 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size) 266 { 267 u32 temp = le32_to_cpu(*bitfield); 268 u32 mask = ((1 << size) - 1) << pos; 269 temp &= mask; 270 temp >>= pos; 271 return temp; 272 } 273 274 275 276 #endif /* __BIG_ENDIAN_BITFIELD */ 277 278 #ifdef __BIG_ENDIAN_BITFIELD 279 280 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \ 281 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \ 282 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE) 283 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \ 284 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \ 285 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE) 286 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \ 287 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \ 288 VMXNET3_TCD_GEN_SIZE) 289 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \ 290 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE) 291 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \ 292 (dstrcd) = (tmp); \ 293 vmxnet3_RxCompToCPU((rcd), (tmp)); \ 294 } while (0) 295 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \ 296 (dstrxd) = (tmp); \ 297 vmxnet3_RxDescToCPU((rxd), (tmp)); \ 298 } while (0) 299 300 #else 301 302 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen) 303 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop) 304 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen) 305 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx) 306 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) 307 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd) 308 309 #endif /* __BIG_ENDIAN_BITFIELD */ 310 311 312 static void 313 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 314 struct pci_dev *pdev) 315 { 316 if (tbi->map_type == VMXNET3_MAP_SINGLE) 317 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, 318 PCI_DMA_TODEVICE); 319 else if (tbi->map_type == VMXNET3_MAP_PAGE) 320 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, 321 PCI_DMA_TODEVICE); 322 else 323 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); 324 325 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ 326 } 327 328 329 static int 330 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, 331 struct pci_dev *pdev, struct vmxnet3_adapter *adapter) 332 { 333 struct sk_buff *skb; 334 int entries = 0; 335 336 /* no out of order completion */ 337 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 338 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); 339 340 skb = tq->buf_info[eop_idx].skb; 341 BUG_ON(skb == NULL); 342 tq->buf_info[eop_idx].skb = NULL; 343 344 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); 345 346 while (tq->tx_ring.next2comp != eop_idx) { 347 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, 348 pdev); 349 350 /* update next2comp w/o tx_lock. Since we are marking more, 351 * instead of less, tx ring entries avail, the worst case is 352 * that the tx routine incorrectly re-queues a pkt due to 353 * insufficient tx ring entries. 354 */ 355 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 356 entries++; 357 } 358 359 dev_kfree_skb_any(skb); 360 return entries; 361 } 362 363 364 static int 365 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, 366 struct vmxnet3_adapter *adapter) 367 { 368 int completed = 0; 369 union Vmxnet3_GenericDesc *gdesc; 370 371 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 372 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { 373 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( 374 &gdesc->tcd), tq, adapter->pdev, 375 adapter); 376 377 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 378 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 379 } 380 381 if (completed) { 382 spin_lock(&tq->tx_lock); 383 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && 384 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > 385 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && 386 netif_carrier_ok(adapter->netdev))) { 387 vmxnet3_tq_wake(tq, adapter); 388 } 389 spin_unlock(&tq->tx_lock); 390 } 391 return completed; 392 } 393 394 395 static void 396 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, 397 struct vmxnet3_adapter *adapter) 398 { 399 int i; 400 401 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { 402 struct vmxnet3_tx_buf_info *tbi; 403 404 tbi = tq->buf_info + tq->tx_ring.next2comp; 405 406 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); 407 if (tbi->skb) { 408 dev_kfree_skb_any(tbi->skb); 409 tbi->skb = NULL; 410 } 411 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 412 } 413 414 /* sanity check, verify all buffers are indeed unmapped and freed */ 415 for (i = 0; i < tq->tx_ring.size; i++) { 416 BUG_ON(tq->buf_info[i].skb != NULL || 417 tq->buf_info[i].map_type != VMXNET3_MAP_NONE); 418 } 419 420 tq->tx_ring.gen = VMXNET3_INIT_GEN; 421 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 422 423 tq->comp_ring.gen = VMXNET3_INIT_GEN; 424 tq->comp_ring.next2proc = 0; 425 } 426 427 428 static void 429 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 430 struct vmxnet3_adapter *adapter) 431 { 432 if (tq->tx_ring.base) { 433 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * 434 sizeof(struct Vmxnet3_TxDesc), 435 tq->tx_ring.base, tq->tx_ring.basePA); 436 tq->tx_ring.base = NULL; 437 } 438 if (tq->data_ring.base) { 439 dma_free_coherent(&adapter->pdev->dev, 440 tq->data_ring.size * tq->txdata_desc_size, 441 tq->data_ring.base, tq->data_ring.basePA); 442 tq->data_ring.base = NULL; 443 } 444 if (tq->comp_ring.base) { 445 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * 446 sizeof(struct Vmxnet3_TxCompDesc), 447 tq->comp_ring.base, tq->comp_ring.basePA); 448 tq->comp_ring.base = NULL; 449 } 450 if (tq->buf_info) { 451 dma_free_coherent(&adapter->pdev->dev, 452 tq->tx_ring.size * sizeof(tq->buf_info[0]), 453 tq->buf_info, tq->buf_info_pa); 454 tq->buf_info = NULL; 455 } 456 } 457 458 459 /* Destroy all tx queues */ 460 void 461 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter) 462 { 463 int i; 464 465 for (i = 0; i < adapter->num_tx_queues; i++) 466 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); 467 } 468 469 470 static void 471 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 472 struct vmxnet3_adapter *adapter) 473 { 474 int i; 475 476 /* reset the tx ring contents to 0 and reset the tx ring states */ 477 memset(tq->tx_ring.base, 0, tq->tx_ring.size * 478 sizeof(struct Vmxnet3_TxDesc)); 479 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 480 tq->tx_ring.gen = VMXNET3_INIT_GEN; 481 482 memset(tq->data_ring.base, 0, 483 tq->data_ring.size * tq->txdata_desc_size); 484 485 /* reset the tx comp ring contents to 0 and reset comp ring states */ 486 memset(tq->comp_ring.base, 0, tq->comp_ring.size * 487 sizeof(struct Vmxnet3_TxCompDesc)); 488 tq->comp_ring.next2proc = 0; 489 tq->comp_ring.gen = VMXNET3_INIT_GEN; 490 491 /* reset the bookkeeping data */ 492 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); 493 for (i = 0; i < tq->tx_ring.size; i++) 494 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; 495 496 /* stats are not reset */ 497 } 498 499 500 static int 501 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 502 struct vmxnet3_adapter *adapter) 503 { 504 size_t sz; 505 506 BUG_ON(tq->tx_ring.base || tq->data_ring.base || 507 tq->comp_ring.base || tq->buf_info); 508 509 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 510 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), 511 &tq->tx_ring.basePA, GFP_KERNEL); 512 if (!tq->tx_ring.base) { 513 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); 514 goto err; 515 } 516 517 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 518 tq->data_ring.size * tq->txdata_desc_size, 519 &tq->data_ring.basePA, GFP_KERNEL); 520 if (!tq->data_ring.base) { 521 netdev_err(adapter->netdev, "failed to allocate tx data ring\n"); 522 goto err; 523 } 524 525 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 526 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), 527 &tq->comp_ring.basePA, GFP_KERNEL); 528 if (!tq->comp_ring.base) { 529 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); 530 goto err; 531 } 532 533 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 534 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, 535 &tq->buf_info_pa, GFP_KERNEL); 536 if (!tq->buf_info) 537 goto err; 538 539 return 0; 540 541 err: 542 vmxnet3_tq_destroy(tq, adapter); 543 return -ENOMEM; 544 } 545 546 static void 547 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter) 548 { 549 int i; 550 551 for (i = 0; i < adapter->num_tx_queues; i++) 552 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); 553 } 554 555 /* 556 * starting from ring->next2fill, allocate rx buffers for the given ring 557 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers 558 * are allocated or allocation fails 559 */ 560 561 static int 562 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, 563 int num_to_alloc, struct vmxnet3_adapter *adapter) 564 { 565 int num_allocated = 0; 566 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; 567 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; 568 u32 val; 569 570 while (num_allocated <= num_to_alloc) { 571 struct vmxnet3_rx_buf_info *rbi; 572 union Vmxnet3_GenericDesc *gd; 573 574 rbi = rbi_base + ring->next2fill; 575 gd = ring->base + ring->next2fill; 576 577 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { 578 if (rbi->skb == NULL) { 579 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, 580 rbi->len, 581 GFP_KERNEL); 582 if (unlikely(rbi->skb == NULL)) { 583 rq->stats.rx_buf_alloc_failure++; 584 break; 585 } 586 587 rbi->dma_addr = dma_map_single( 588 &adapter->pdev->dev, 589 rbi->skb->data, rbi->len, 590 PCI_DMA_FROMDEVICE); 591 if (dma_mapping_error(&adapter->pdev->dev, 592 rbi->dma_addr)) { 593 dev_kfree_skb_any(rbi->skb); 594 rq->stats.rx_buf_alloc_failure++; 595 break; 596 } 597 } else { 598 /* rx buffer skipped by the device */ 599 } 600 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; 601 } else { 602 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || 603 rbi->len != PAGE_SIZE); 604 605 if (rbi->page == NULL) { 606 rbi->page = alloc_page(GFP_ATOMIC); 607 if (unlikely(rbi->page == NULL)) { 608 rq->stats.rx_buf_alloc_failure++; 609 break; 610 } 611 rbi->dma_addr = dma_map_page( 612 &adapter->pdev->dev, 613 rbi->page, 0, PAGE_SIZE, 614 PCI_DMA_FROMDEVICE); 615 if (dma_mapping_error(&adapter->pdev->dev, 616 rbi->dma_addr)) { 617 put_page(rbi->page); 618 rq->stats.rx_buf_alloc_failure++; 619 break; 620 } 621 } else { 622 /* rx buffers skipped by the device */ 623 } 624 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 625 } 626 627 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 628 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) 629 | val | rbi->len); 630 631 /* Fill the last buffer but dont mark it ready, or else the 632 * device will think that the queue is full */ 633 if (num_allocated == num_to_alloc) 634 break; 635 636 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); 637 num_allocated++; 638 vmxnet3_cmd_ring_adv_next2fill(ring); 639 } 640 641 netdev_dbg(adapter->netdev, 642 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n", 643 num_allocated, ring->next2fill, ring->next2comp); 644 645 /* so that the device can distinguish a full ring and an empty ring */ 646 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); 647 648 return num_allocated; 649 } 650 651 652 static void 653 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, 654 struct vmxnet3_rx_buf_info *rbi) 655 { 656 struct skb_frag_struct *frag = skb_shinfo(skb)->frags + 657 skb_shinfo(skb)->nr_frags; 658 659 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 660 661 __skb_frag_set_page(frag, rbi->page); 662 frag->page_offset = 0; 663 skb_frag_size_set(frag, rcd->len); 664 skb->data_len += rcd->len; 665 skb->truesize += PAGE_SIZE; 666 skb_shinfo(skb)->nr_frags++; 667 } 668 669 670 static int 671 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 672 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 673 struct vmxnet3_adapter *adapter) 674 { 675 u32 dw2, len; 676 unsigned long buf_offset; 677 int i; 678 union Vmxnet3_GenericDesc *gdesc; 679 struct vmxnet3_tx_buf_info *tbi = NULL; 680 681 BUG_ON(ctx->copy_size > skb_headlen(skb)); 682 683 /* use the previous gen bit for the SOP desc */ 684 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; 685 686 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; 687 gdesc = ctx->sop_txd; /* both loops below can be skipped */ 688 689 /* no need to map the buffer if headers are copied */ 690 if (ctx->copy_size) { 691 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + 692 tq->tx_ring.next2fill * 693 tq->txdata_desc_size); 694 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); 695 ctx->sop_txd->dword[3] = 0; 696 697 tbi = tq->buf_info + tq->tx_ring.next2fill; 698 tbi->map_type = VMXNET3_MAP_NONE; 699 700 netdev_dbg(adapter->netdev, 701 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 702 tq->tx_ring.next2fill, 703 le64_to_cpu(ctx->sop_txd->txd.addr), 704 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 705 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 706 707 /* use the right gen for non-SOP desc */ 708 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 709 } 710 711 /* linear part can use multiple tx desc if it's big */ 712 len = skb_headlen(skb) - ctx->copy_size; 713 buf_offset = ctx->copy_size; 714 while (len) { 715 u32 buf_size; 716 717 if (len < VMXNET3_MAX_TX_BUF_SIZE) { 718 buf_size = len; 719 dw2 |= len; 720 } else { 721 buf_size = VMXNET3_MAX_TX_BUF_SIZE; 722 /* spec says that for TxDesc.len, 0 == 2^14 */ 723 } 724 725 tbi = tq->buf_info + tq->tx_ring.next2fill; 726 tbi->map_type = VMXNET3_MAP_SINGLE; 727 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, 728 skb->data + buf_offset, buf_size, 729 PCI_DMA_TODEVICE); 730 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) 731 return -EFAULT; 732 733 tbi->len = buf_size; 734 735 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 736 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 737 738 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 739 gdesc->dword[2] = cpu_to_le32(dw2); 740 gdesc->dword[3] = 0; 741 742 netdev_dbg(adapter->netdev, 743 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 744 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 745 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 746 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 747 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 748 749 len -= buf_size; 750 buf_offset += buf_size; 751 } 752 753 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 754 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 755 u32 buf_size; 756 757 buf_offset = 0; 758 len = skb_frag_size(frag); 759 while (len) { 760 tbi = tq->buf_info + tq->tx_ring.next2fill; 761 if (len < VMXNET3_MAX_TX_BUF_SIZE) { 762 buf_size = len; 763 dw2 |= len; 764 } else { 765 buf_size = VMXNET3_MAX_TX_BUF_SIZE; 766 /* spec says that for TxDesc.len, 0 == 2^14 */ 767 } 768 tbi->map_type = VMXNET3_MAP_PAGE; 769 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 770 buf_offset, buf_size, 771 DMA_TO_DEVICE); 772 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) 773 return -EFAULT; 774 775 tbi->len = buf_size; 776 777 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 778 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 779 780 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 781 gdesc->dword[2] = cpu_to_le32(dw2); 782 gdesc->dword[3] = 0; 783 784 netdev_dbg(adapter->netdev, 785 "txd[%u]: 0x%llx %u %u\n", 786 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 787 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 788 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 789 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 790 791 len -= buf_size; 792 buf_offset += buf_size; 793 } 794 } 795 796 ctx->eop_txd = gdesc; 797 798 /* set the last buf_info for the pkt */ 799 tbi->skb = skb; 800 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 801 802 return 0; 803 } 804 805 806 /* Init all tx queues */ 807 static void 808 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) 809 { 810 int i; 811 812 for (i = 0; i < adapter->num_tx_queues; i++) 813 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); 814 } 815 816 817 /* 818 * parse relevant protocol headers: 819 * For a tso pkt, relevant headers are L2/3/4 including options 820 * For a pkt requesting csum offloading, they are L2/3 and may include L4 821 * if it's a TCP/UDP pkt 822 * 823 * Returns: 824 * -1: error happens during parsing 825 * 0: protocol headers parsed, but too big to be copied 826 * 1: protocol headers parsed and copied 827 * 828 * Other effects: 829 * 1. related *ctx fields are updated. 830 * 2. ctx->copy_size is # of bytes copied 831 * 3. the portion to be copied is guaranteed to be in the linear part 832 * 833 */ 834 static int 835 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 836 struct vmxnet3_tx_ctx *ctx, 837 struct vmxnet3_adapter *adapter) 838 { 839 u8 protocol = 0; 840 841 if (ctx->mss) { /* TSO */ 842 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 843 ctx->l4_hdr_size = tcp_hdrlen(skb); 844 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 845 } else { 846 if (skb->ip_summed == CHECKSUM_PARTIAL) { 847 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 848 849 if (ctx->ipv4) { 850 const struct iphdr *iph = ip_hdr(skb); 851 852 protocol = iph->protocol; 853 } else if (ctx->ipv6) { 854 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 855 856 protocol = ipv6h->nexthdr; 857 } 858 859 switch (protocol) { 860 case IPPROTO_TCP: 861 ctx->l4_hdr_size = tcp_hdrlen(skb); 862 break; 863 case IPPROTO_UDP: 864 ctx->l4_hdr_size = sizeof(struct udphdr); 865 break; 866 default: 867 ctx->l4_hdr_size = 0; 868 break; 869 } 870 871 ctx->copy_size = min(ctx->eth_ip_hdr_size + 872 ctx->l4_hdr_size, skb->len); 873 } else { 874 ctx->eth_ip_hdr_size = 0; 875 ctx->l4_hdr_size = 0; 876 /* copy as much as allowed */ 877 ctx->copy_size = min_t(unsigned int, 878 tq->txdata_desc_size, 879 skb_headlen(skb)); 880 } 881 882 if (skb->len <= VMXNET3_HDR_COPY_SIZE) 883 ctx->copy_size = skb->len; 884 885 /* make sure headers are accessible directly */ 886 if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) 887 goto err; 888 } 889 890 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { 891 tq->stats.oversized_hdr++; 892 ctx->copy_size = 0; 893 return 0; 894 } 895 896 return 1; 897 err: 898 return -1; 899 } 900 901 /* 902 * copy relevant protocol headers to the transmit ring: 903 * For a tso pkt, relevant headers are L2/3/4 including options 904 * For a pkt requesting csum offloading, they are L2/3 and may include L4 905 * if it's a TCP/UDP pkt 906 * 907 * 908 * Note that this requires that vmxnet3_parse_hdr be called first to set the 909 * appropriate bits in ctx first 910 */ 911 static void 912 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 913 struct vmxnet3_tx_ctx *ctx, 914 struct vmxnet3_adapter *adapter) 915 { 916 struct Vmxnet3_TxDataDesc *tdd; 917 918 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 919 920 memcpy(tdd->data, skb->data, ctx->copy_size); 921 netdev_dbg(adapter->netdev, 922 "copy %u bytes to dataRing[%u]\n", 923 ctx->copy_size, tq->tx_ring.next2fill); 924 } 925 926 927 static void 928 vmxnet3_prepare_tso(struct sk_buff *skb, 929 struct vmxnet3_tx_ctx *ctx) 930 { 931 struct tcphdr *tcph = tcp_hdr(skb); 932 933 if (ctx->ipv4) { 934 struct iphdr *iph = ip_hdr(skb); 935 936 iph->check = 0; 937 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 938 IPPROTO_TCP, 0); 939 } else if (ctx->ipv6) { 940 struct ipv6hdr *iph = ipv6_hdr(skb); 941 942 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, 943 IPPROTO_TCP, 0); 944 } 945 } 946 947 static int txd_estimate(const struct sk_buff *skb) 948 { 949 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 950 int i; 951 952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 953 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 954 955 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); 956 } 957 return count; 958 } 959 960 /* 961 * Transmits a pkt thru a given tq 962 * Returns: 963 * NETDEV_TX_OK: descriptors are setup successfully 964 * NETDEV_TX_OK: error occurred, the pkt is dropped 965 * NETDEV_TX_BUSY: tx ring is full, queue is stopped 966 * 967 * Side-effects: 968 * 1. tx ring may be changed 969 * 2. tq stats may be updated accordingly 970 * 3. shared->txNumDeferred may be updated 971 */ 972 973 static int 974 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 975 struct vmxnet3_adapter *adapter, struct net_device *netdev) 976 { 977 int ret; 978 u32 count; 979 unsigned long flags; 980 struct vmxnet3_tx_ctx ctx; 981 union Vmxnet3_GenericDesc *gdesc; 982 #ifdef __BIG_ENDIAN_BITFIELD 983 /* Use temporary descriptor to avoid touching bits multiple times */ 984 union Vmxnet3_GenericDesc tempTxDesc; 985 #endif 986 987 count = txd_estimate(skb); 988 989 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); 990 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6)); 991 992 ctx.mss = skb_shinfo(skb)->gso_size; 993 if (ctx.mss) { 994 if (skb_header_cloned(skb)) { 995 if (unlikely(pskb_expand_head(skb, 0, 0, 996 GFP_ATOMIC) != 0)) { 997 tq->stats.drop_tso++; 998 goto drop_pkt; 999 } 1000 tq->stats.copy_skb_header++; 1001 } 1002 vmxnet3_prepare_tso(skb, &ctx); 1003 } else { 1004 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { 1005 1006 /* non-tso pkts must not use more than 1007 * VMXNET3_MAX_TXD_PER_PKT entries 1008 */ 1009 if (skb_linearize(skb) != 0) { 1010 tq->stats.drop_too_many_frags++; 1011 goto drop_pkt; 1012 } 1013 tq->stats.linearized++; 1014 1015 /* recalculate the # of descriptors to use */ 1016 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 1017 } 1018 } 1019 1020 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter); 1021 if (ret >= 0) { 1022 BUG_ON(ret <= 0 && ctx.copy_size != 0); 1023 /* hdrs parsed, check against other limits */ 1024 if (ctx.mss) { 1025 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > 1026 VMXNET3_MAX_TX_BUF_SIZE)) { 1027 tq->stats.drop_oversized_hdr++; 1028 goto drop_pkt; 1029 } 1030 } else { 1031 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1032 if (unlikely(ctx.eth_ip_hdr_size + 1033 skb->csum_offset > 1034 VMXNET3_MAX_CSUM_OFFSET)) { 1035 tq->stats.drop_oversized_hdr++; 1036 goto drop_pkt; 1037 } 1038 } 1039 } 1040 } else { 1041 tq->stats.drop_hdr_inspect_err++; 1042 goto drop_pkt; 1043 } 1044 1045 spin_lock_irqsave(&tq->tx_lock, flags); 1046 1047 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 1048 tq->stats.tx_ring_full++; 1049 netdev_dbg(adapter->netdev, 1050 "tx queue stopped on %s, next2comp %u" 1051 " next2fill %u\n", adapter->netdev->name, 1052 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 1053 1054 vmxnet3_tq_stop(tq, adapter); 1055 spin_unlock_irqrestore(&tq->tx_lock, flags); 1056 return NETDEV_TX_BUSY; 1057 } 1058 1059 1060 vmxnet3_copy_hdr(skb, tq, &ctx, adapter); 1061 1062 /* fill tx descs related to addr & len */ 1063 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) 1064 goto unlock_drop_pkt; 1065 1066 /* setup the EOP desc */ 1067 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 1068 1069 /* setup the SOP desc */ 1070 #ifdef __BIG_ENDIAN_BITFIELD 1071 gdesc = &tempTxDesc; 1072 gdesc->dword[2] = ctx.sop_txd->dword[2]; 1073 gdesc->dword[3] = ctx.sop_txd->dword[3]; 1074 #else 1075 gdesc = ctx.sop_txd; 1076 #endif 1077 if (ctx.mss) { 1078 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 1079 gdesc->txd.om = VMXNET3_OM_TSO; 1080 gdesc->txd.msscof = ctx.mss; 1081 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - 1082 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); 1083 } else { 1084 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1085 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 1086 gdesc->txd.om = VMXNET3_OM_CSUM; 1087 gdesc->txd.msscof = ctx.eth_ip_hdr_size + 1088 skb->csum_offset; 1089 } else { 1090 gdesc->txd.om = 0; 1091 gdesc->txd.msscof = 0; 1092 } 1093 le32_add_cpu(&tq->shared->txNumDeferred, 1); 1094 } 1095 1096 if (skb_vlan_tag_present(skb)) { 1097 gdesc->txd.ti = 1; 1098 gdesc->txd.tci = skb_vlan_tag_get(skb); 1099 } 1100 1101 /* finally flips the GEN bit of the SOP desc. */ 1102 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ 1103 VMXNET3_TXD_GEN); 1104 #ifdef __BIG_ENDIAN_BITFIELD 1105 /* Finished updating in bitfields of Tx Desc, so write them in original 1106 * place. 1107 */ 1108 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc, 1109 (struct Vmxnet3_TxDesc *)ctx.sop_txd); 1110 gdesc = ctx.sop_txd; 1111 #endif 1112 netdev_dbg(adapter->netdev, 1113 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 1114 (u32)(ctx.sop_txd - 1115 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), 1116 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); 1117 1118 spin_unlock_irqrestore(&tq->tx_lock, flags); 1119 1120 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1121 le32_to_cpu(tq->shared->txThreshold)) { 1122 tq->shared->txNumDeferred = 0; 1123 VMXNET3_WRITE_BAR0_REG(adapter, 1124 VMXNET3_REG_TXPROD + tq->qid * 8, 1125 tq->tx_ring.next2fill); 1126 } 1127 1128 return NETDEV_TX_OK; 1129 1130 unlock_drop_pkt: 1131 spin_unlock_irqrestore(&tq->tx_lock, flags); 1132 drop_pkt: 1133 tq->stats.drop_total++; 1134 dev_kfree_skb_any(skb); 1135 return NETDEV_TX_OK; 1136 } 1137 1138 1139 static netdev_tx_t 1140 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1141 { 1142 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1143 1144 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); 1145 return vmxnet3_tq_xmit(skb, 1146 &adapter->tx_queue[skb->queue_mapping], 1147 adapter, netdev); 1148 } 1149 1150 1151 static void 1152 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, 1153 struct sk_buff *skb, 1154 union Vmxnet3_GenericDesc *gdesc) 1155 { 1156 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1157 if (gdesc->rcd.v4 && 1158 (le32_to_cpu(gdesc->dword[3]) & 1159 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { 1160 skb->ip_summed = CHECKSUM_UNNECESSARY; 1161 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1162 BUG_ON(gdesc->rcd.frg); 1163 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & 1164 (1 << VMXNET3_RCD_TUC_SHIFT))) { 1165 skb->ip_summed = CHECKSUM_UNNECESSARY; 1166 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1167 BUG_ON(gdesc->rcd.frg); 1168 } else { 1169 if (gdesc->rcd.csum) { 1170 skb->csum = htons(gdesc->rcd.csum); 1171 skb->ip_summed = CHECKSUM_PARTIAL; 1172 } else { 1173 skb_checksum_none_assert(skb); 1174 } 1175 } 1176 } else { 1177 skb_checksum_none_assert(skb); 1178 } 1179 } 1180 1181 1182 static void 1183 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, 1184 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) 1185 { 1186 rq->stats.drop_err++; 1187 if (!rcd->fcs) 1188 rq->stats.drop_fcs++; 1189 1190 rq->stats.drop_total++; 1191 1192 /* 1193 * We do not unmap and chain the rx buffer to the skb. 1194 * We basically pretend this buffer is not used and will be recycled 1195 * by vmxnet3_rq_alloc_rx_buf() 1196 */ 1197 1198 /* 1199 * ctx->skb may be NULL if this is the first and the only one 1200 * desc for the pkt 1201 */ 1202 if (ctx->skb) 1203 dev_kfree_skb_irq(ctx->skb); 1204 1205 ctx->skb = NULL; 1206 } 1207 1208 1209 static u32 1210 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, 1211 union Vmxnet3_GenericDesc *gdesc) 1212 { 1213 u32 hlen, maplen; 1214 union { 1215 void *ptr; 1216 struct ethhdr *eth; 1217 struct iphdr *ipv4; 1218 struct ipv6hdr *ipv6; 1219 struct tcphdr *tcp; 1220 } hdr; 1221 BUG_ON(gdesc->rcd.tcp == 0); 1222 1223 maplen = skb_headlen(skb); 1224 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) 1225 return 0; 1226 1227 hdr.eth = eth_hdr(skb); 1228 if (gdesc->rcd.v4) { 1229 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); 1230 hdr.ptr += sizeof(struct ethhdr); 1231 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); 1232 hlen = hdr.ipv4->ihl << 2; 1233 hdr.ptr += hdr.ipv4->ihl << 2; 1234 } else if (gdesc->rcd.v6) { 1235 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); 1236 hdr.ptr += sizeof(struct ethhdr); 1237 /* Use an estimated value, since we also need to handle 1238 * TSO case. 1239 */ 1240 if (hdr.ipv6->nexthdr != IPPROTO_TCP) 1241 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 1242 hlen = sizeof(struct ipv6hdr); 1243 hdr.ptr += sizeof(struct ipv6hdr); 1244 } else { 1245 /* Non-IP pkt, dont estimate header length */ 1246 return 0; 1247 } 1248 1249 if (hlen + sizeof(struct tcphdr) > maplen) 1250 return 0; 1251 1252 return (hlen + (hdr.tcp->doff << 2)); 1253 } 1254 1255 static int 1256 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, 1257 struct vmxnet3_adapter *adapter, int quota) 1258 { 1259 static const u32 rxprod_reg[2] = { 1260 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 1261 }; 1262 u32 num_pkts = 0; 1263 bool skip_page_frags = false; 1264 struct Vmxnet3_RxCompDesc *rcd; 1265 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1266 u16 segCnt = 0, mss = 0; 1267 #ifdef __BIG_ENDIAN_BITFIELD 1268 struct Vmxnet3_RxDesc rxCmdDesc; 1269 struct Vmxnet3_RxCompDesc rxComp; 1270 #endif 1271 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, 1272 &rxComp); 1273 while (rcd->gen == rq->comp_ring.gen) { 1274 struct vmxnet3_rx_buf_info *rbi; 1275 struct sk_buff *skb, *new_skb = NULL; 1276 struct page *new_page = NULL; 1277 dma_addr_t new_dma_addr; 1278 int num_to_alloc; 1279 struct Vmxnet3_RxDesc *rxd; 1280 u32 idx, ring_idx; 1281 struct vmxnet3_cmd_ring *ring = NULL; 1282 if (num_pkts >= quota) { 1283 /* we may stop even before we see the EOP desc of 1284 * the current pkt 1285 */ 1286 break; 1287 } 1288 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && 1289 rcd->rqID != rq->dataRingQid); 1290 idx = rcd->rxdIdx; 1291 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID); 1292 ring = rq->rx_ring + ring_idx; 1293 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1294 &rxCmdDesc); 1295 rbi = rq->buf_info[ring_idx] + idx; 1296 1297 BUG_ON(rxd->addr != rbi->dma_addr || 1298 rxd->len != rbi->len); 1299 1300 if (unlikely(rcd->eop && rcd->err)) { 1301 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1302 goto rcd_done; 1303 } 1304 1305 if (rcd->sop) { /* first buf of the pkt */ 1306 bool rxDataRingUsed; 1307 u16 len; 1308 1309 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || 1310 (rcd->rqID != rq->qid && 1311 rcd->rqID != rq->dataRingQid)); 1312 1313 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); 1314 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); 1315 1316 if (unlikely(rcd->len == 0)) { 1317 /* Pretend the rx buffer is skipped. */ 1318 BUG_ON(!(rcd->sop && rcd->eop)); 1319 netdev_dbg(adapter->netdev, 1320 "rxRing[%u][%u] 0 length\n", 1321 ring_idx, idx); 1322 goto rcd_done; 1323 } 1324 1325 skip_page_frags = false; 1326 ctx->skb = rbi->skb; 1327 1328 rxDataRingUsed = 1329 VMXNET3_RX_DATA_RING(adapter, rcd->rqID); 1330 len = rxDataRingUsed ? rcd->len : rbi->len; 1331 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, 1332 len); 1333 if (new_skb == NULL) { 1334 /* Skb allocation failed, do not handover this 1335 * skb to stack. Reuse it. Drop the existing pkt 1336 */ 1337 rq->stats.rx_buf_alloc_failure++; 1338 ctx->skb = NULL; 1339 rq->stats.drop_total++; 1340 skip_page_frags = true; 1341 goto rcd_done; 1342 } 1343 1344 if (rxDataRingUsed) { 1345 size_t sz; 1346 1347 BUG_ON(rcd->len > rq->data_ring.desc_size); 1348 1349 ctx->skb = new_skb; 1350 sz = rcd->rxdIdx * rq->data_ring.desc_size; 1351 memcpy(new_skb->data, 1352 &rq->data_ring.base[sz], rcd->len); 1353 } else { 1354 ctx->skb = rbi->skb; 1355 1356 new_dma_addr = 1357 dma_map_single(&adapter->pdev->dev, 1358 new_skb->data, rbi->len, 1359 PCI_DMA_FROMDEVICE); 1360 if (dma_mapping_error(&adapter->pdev->dev, 1361 new_dma_addr)) { 1362 dev_kfree_skb(new_skb); 1363 /* Skb allocation failed, do not 1364 * handover this skb to stack. Reuse 1365 * it. Drop the existing pkt. 1366 */ 1367 rq->stats.rx_buf_alloc_failure++; 1368 ctx->skb = NULL; 1369 rq->stats.drop_total++; 1370 skip_page_frags = true; 1371 goto rcd_done; 1372 } 1373 1374 dma_unmap_single(&adapter->pdev->dev, 1375 rbi->dma_addr, 1376 rbi->len, 1377 PCI_DMA_FROMDEVICE); 1378 1379 /* Immediate refill */ 1380 rbi->skb = new_skb; 1381 rbi->dma_addr = new_dma_addr; 1382 rxd->addr = cpu_to_le64(rbi->dma_addr); 1383 rxd->len = rbi->len; 1384 } 1385 1386 #ifdef VMXNET3_RSS 1387 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && 1388 (adapter->netdev->features & NETIF_F_RXHASH)) 1389 skb_set_hash(ctx->skb, 1390 le32_to_cpu(rcd->rssHash), 1391 PKT_HASH_TYPE_L3); 1392 #endif 1393 skb_put(ctx->skb, rcd->len); 1394 1395 if (VMXNET3_VERSION_GE_2(adapter) && 1396 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { 1397 struct Vmxnet3_RxCompDescExt *rcdlro; 1398 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; 1399 1400 segCnt = rcdlro->segCnt; 1401 WARN_ON_ONCE(segCnt == 0); 1402 mss = rcdlro->mss; 1403 if (unlikely(segCnt <= 1)) 1404 segCnt = 0; 1405 } else { 1406 segCnt = 0; 1407 } 1408 } else { 1409 BUG_ON(ctx->skb == NULL && !skip_page_frags); 1410 1411 /* non SOP buffer must be type 1 in most cases */ 1412 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); 1413 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); 1414 1415 /* If an sop buffer was dropped, skip all 1416 * following non-sop fragments. They will be reused. 1417 */ 1418 if (skip_page_frags) 1419 goto rcd_done; 1420 1421 if (rcd->len) { 1422 new_page = alloc_page(GFP_ATOMIC); 1423 /* Replacement page frag could not be allocated. 1424 * Reuse this page. Drop the pkt and free the 1425 * skb which contained this page as a frag. Skip 1426 * processing all the following non-sop frags. 1427 */ 1428 if (unlikely(!new_page)) { 1429 rq->stats.rx_buf_alloc_failure++; 1430 dev_kfree_skb(ctx->skb); 1431 ctx->skb = NULL; 1432 skip_page_frags = true; 1433 goto rcd_done; 1434 } 1435 new_dma_addr = dma_map_page(&adapter->pdev->dev, 1436 new_page, 1437 0, PAGE_SIZE, 1438 PCI_DMA_FROMDEVICE); 1439 if (dma_mapping_error(&adapter->pdev->dev, 1440 new_dma_addr)) { 1441 put_page(new_page); 1442 rq->stats.rx_buf_alloc_failure++; 1443 dev_kfree_skb(ctx->skb); 1444 ctx->skb = NULL; 1445 skip_page_frags = true; 1446 goto rcd_done; 1447 } 1448 1449 dma_unmap_page(&adapter->pdev->dev, 1450 rbi->dma_addr, rbi->len, 1451 PCI_DMA_FROMDEVICE); 1452 1453 vmxnet3_append_frag(ctx->skb, rcd, rbi); 1454 1455 /* Immediate refill */ 1456 rbi->page = new_page; 1457 rbi->dma_addr = new_dma_addr; 1458 rxd->addr = cpu_to_le64(rbi->dma_addr); 1459 rxd->len = rbi->len; 1460 } 1461 } 1462 1463 1464 skb = ctx->skb; 1465 if (rcd->eop) { 1466 u32 mtu = adapter->netdev->mtu; 1467 skb->len += skb->data_len; 1468 1469 vmxnet3_rx_csum(adapter, skb, 1470 (union Vmxnet3_GenericDesc *)rcd); 1471 skb->protocol = eth_type_trans(skb, adapter->netdev); 1472 if (!rcd->tcp || !adapter->lro) 1473 goto not_lro; 1474 1475 if (segCnt != 0 && mss != 0) { 1476 skb_shinfo(skb)->gso_type = rcd->v4 ? 1477 SKB_GSO_TCPV4 : SKB_GSO_TCPV6; 1478 skb_shinfo(skb)->gso_size = mss; 1479 skb_shinfo(skb)->gso_segs = segCnt; 1480 } else if (segCnt != 0 || skb->len > mtu) { 1481 u32 hlen; 1482 1483 hlen = vmxnet3_get_hdr_len(adapter, skb, 1484 (union Vmxnet3_GenericDesc *)rcd); 1485 if (hlen == 0) 1486 goto not_lro; 1487 1488 skb_shinfo(skb)->gso_type = 1489 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; 1490 if (segCnt != 0) { 1491 skb_shinfo(skb)->gso_segs = segCnt; 1492 skb_shinfo(skb)->gso_size = 1493 DIV_ROUND_UP(skb->len - 1494 hlen, segCnt); 1495 } else { 1496 skb_shinfo(skb)->gso_size = mtu - hlen; 1497 } 1498 } 1499 not_lro: 1500 if (unlikely(rcd->ts)) 1501 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); 1502 1503 if (adapter->netdev->features & NETIF_F_LRO) 1504 netif_receive_skb(skb); 1505 else 1506 napi_gro_receive(&rq->napi, skb); 1507 1508 ctx->skb = NULL; 1509 num_pkts++; 1510 } 1511 1512 rcd_done: 1513 /* device may have skipped some rx descs */ 1514 ring->next2comp = idx; 1515 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); 1516 ring = rq->rx_ring + ring_idx; 1517 while (num_to_alloc) { 1518 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, 1519 &rxCmdDesc); 1520 BUG_ON(!rxd->addr); 1521 1522 /* Recv desc is ready to be used by the device */ 1523 rxd->gen = ring->gen; 1524 vmxnet3_cmd_ring_adv_next2fill(ring); 1525 num_to_alloc--; 1526 } 1527 1528 /* if needed, update the register */ 1529 if (unlikely(rq->shared->updateRxProd)) { 1530 VMXNET3_WRITE_BAR0_REG(adapter, 1531 rxprod_reg[ring_idx] + rq->qid * 8, 1532 ring->next2fill); 1533 } 1534 1535 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1536 vmxnet3_getRxComp(rcd, 1537 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1538 } 1539 1540 return num_pkts; 1541 } 1542 1543 1544 static void 1545 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, 1546 struct vmxnet3_adapter *adapter) 1547 { 1548 u32 i, ring_idx; 1549 struct Vmxnet3_RxDesc *rxd; 1550 1551 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1552 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1553 #ifdef __BIG_ENDIAN_BITFIELD 1554 struct Vmxnet3_RxDesc rxDesc; 1555 #endif 1556 vmxnet3_getRxDesc(rxd, 1557 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); 1558 1559 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1560 rq->buf_info[ring_idx][i].skb) { 1561 dma_unmap_single(&adapter->pdev->dev, rxd->addr, 1562 rxd->len, PCI_DMA_FROMDEVICE); 1563 dev_kfree_skb(rq->buf_info[ring_idx][i].skb); 1564 rq->buf_info[ring_idx][i].skb = NULL; 1565 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && 1566 rq->buf_info[ring_idx][i].page) { 1567 dma_unmap_page(&adapter->pdev->dev, rxd->addr, 1568 rxd->len, PCI_DMA_FROMDEVICE); 1569 put_page(rq->buf_info[ring_idx][i].page); 1570 rq->buf_info[ring_idx][i].page = NULL; 1571 } 1572 } 1573 1574 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; 1575 rq->rx_ring[ring_idx].next2fill = 1576 rq->rx_ring[ring_idx].next2comp = 0; 1577 } 1578 1579 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1580 rq->comp_ring.next2proc = 0; 1581 } 1582 1583 1584 static void 1585 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) 1586 { 1587 int i; 1588 1589 for (i = 0; i < adapter->num_rx_queues; i++) 1590 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); 1591 } 1592 1593 1594 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1595 struct vmxnet3_adapter *adapter) 1596 { 1597 int i; 1598 int j; 1599 1600 /* all rx buffers must have already been freed */ 1601 for (i = 0; i < 2; i++) { 1602 if (rq->buf_info[i]) { 1603 for (j = 0; j < rq->rx_ring[i].size; j++) 1604 BUG_ON(rq->buf_info[i][j].page != NULL); 1605 } 1606 } 1607 1608 1609 for (i = 0; i < 2; i++) { 1610 if (rq->rx_ring[i].base) { 1611 dma_free_coherent(&adapter->pdev->dev, 1612 rq->rx_ring[i].size 1613 * sizeof(struct Vmxnet3_RxDesc), 1614 rq->rx_ring[i].base, 1615 rq->rx_ring[i].basePA); 1616 rq->rx_ring[i].base = NULL; 1617 } 1618 rq->buf_info[i] = NULL; 1619 } 1620 1621 if (rq->data_ring.base) { 1622 dma_free_coherent(&adapter->pdev->dev, 1623 rq->rx_ring[0].size * rq->data_ring.desc_size, 1624 rq->data_ring.base, rq->data_ring.basePA); 1625 rq->data_ring.base = NULL; 1626 } 1627 1628 if (rq->comp_ring.base) { 1629 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size 1630 * sizeof(struct Vmxnet3_RxCompDesc), 1631 rq->comp_ring.base, rq->comp_ring.basePA); 1632 rq->comp_ring.base = NULL; 1633 } 1634 1635 if (rq->buf_info[0]) { 1636 size_t sz = sizeof(struct vmxnet3_rx_buf_info) * 1637 (rq->rx_ring[0].size + rq->rx_ring[1].size); 1638 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], 1639 rq->buf_info_pa); 1640 } 1641 } 1642 1643 void 1644 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) 1645 { 1646 int i; 1647 1648 for (i = 0; i < adapter->num_rx_queues; i++) { 1649 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 1650 1651 if (rq->data_ring.base) { 1652 dma_free_coherent(&adapter->pdev->dev, 1653 (rq->rx_ring[0].size * 1654 rq->data_ring.desc_size), 1655 rq->data_ring.base, 1656 rq->data_ring.basePA); 1657 rq->data_ring.base = NULL; 1658 rq->data_ring.desc_size = 0; 1659 } 1660 } 1661 } 1662 1663 static int 1664 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, 1665 struct vmxnet3_adapter *adapter) 1666 { 1667 int i; 1668 1669 /* initialize buf_info */ 1670 for (i = 0; i < rq->rx_ring[0].size; i++) { 1671 1672 /* 1st buf for a pkt is skbuff */ 1673 if (i % adapter->rx_buf_per_pkt == 0) { 1674 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; 1675 rq->buf_info[0][i].len = adapter->skb_buf_size; 1676 } else { /* subsequent bufs for a pkt is frag */ 1677 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; 1678 rq->buf_info[0][i].len = PAGE_SIZE; 1679 } 1680 } 1681 for (i = 0; i < rq->rx_ring[1].size; i++) { 1682 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; 1683 rq->buf_info[1][i].len = PAGE_SIZE; 1684 } 1685 1686 /* reset internal state and allocate buffers for both rings */ 1687 for (i = 0; i < 2; i++) { 1688 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; 1689 1690 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * 1691 sizeof(struct Vmxnet3_RxDesc)); 1692 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; 1693 } 1694 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, 1695 adapter) == 0) { 1696 /* at least has 1 rx buffer for the 1st ring */ 1697 return -ENOMEM; 1698 } 1699 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); 1700 1701 /* reset the comp ring */ 1702 rq->comp_ring.next2proc = 0; 1703 memset(rq->comp_ring.base, 0, rq->comp_ring.size * 1704 sizeof(struct Vmxnet3_RxCompDesc)); 1705 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1706 1707 /* reset rxctx */ 1708 rq->rx_ctx.skb = NULL; 1709 1710 /* stats are not reset */ 1711 return 0; 1712 } 1713 1714 1715 static int 1716 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter) 1717 { 1718 int i, err = 0; 1719 1720 for (i = 0; i < adapter->num_rx_queues; i++) { 1721 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); 1722 if (unlikely(err)) { 1723 dev_err(&adapter->netdev->dev, "%s: failed to " 1724 "initialize rx queue%i\n", 1725 adapter->netdev->name, i); 1726 break; 1727 } 1728 } 1729 return err; 1730 1731 } 1732 1733 1734 static int 1735 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1736 { 1737 int i; 1738 size_t sz; 1739 struct vmxnet3_rx_buf_info *bi; 1740 1741 for (i = 0; i < 2; i++) { 1742 1743 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); 1744 rq->rx_ring[i].base = dma_alloc_coherent( 1745 &adapter->pdev->dev, sz, 1746 &rq->rx_ring[i].basePA, 1747 GFP_KERNEL); 1748 if (!rq->rx_ring[i].base) { 1749 netdev_err(adapter->netdev, 1750 "failed to allocate rx ring %d\n", i); 1751 goto err; 1752 } 1753 } 1754 1755 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) { 1756 sz = rq->rx_ring[0].size * rq->data_ring.desc_size; 1757 rq->data_ring.base = 1758 dma_alloc_coherent(&adapter->pdev->dev, sz, 1759 &rq->data_ring.basePA, 1760 GFP_KERNEL); 1761 if (!rq->data_ring.base) { 1762 netdev_err(adapter->netdev, 1763 "rx data ring will be disabled\n"); 1764 adapter->rxdataring_enabled = false; 1765 } 1766 } else { 1767 rq->data_ring.base = NULL; 1768 rq->data_ring.desc_size = 0; 1769 } 1770 1771 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1772 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, 1773 &rq->comp_ring.basePA, 1774 GFP_KERNEL); 1775 if (!rq->comp_ring.base) { 1776 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); 1777 goto err; 1778 } 1779 1780 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1781 rq->rx_ring[1].size); 1782 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1783 GFP_KERNEL); 1784 if (!bi) 1785 goto err; 1786 1787 rq->buf_info[0] = bi; 1788 rq->buf_info[1] = bi + rq->rx_ring[0].size; 1789 1790 return 0; 1791 1792 err: 1793 vmxnet3_rq_destroy(rq, adapter); 1794 return -ENOMEM; 1795 } 1796 1797 1798 static int 1799 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) 1800 { 1801 int i, err = 0; 1802 1803 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); 1804 1805 for (i = 0; i < adapter->num_rx_queues; i++) { 1806 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); 1807 if (unlikely(err)) { 1808 dev_err(&adapter->netdev->dev, 1809 "%s: failed to create rx queue%i\n", 1810 adapter->netdev->name, i); 1811 goto err_out; 1812 } 1813 } 1814 1815 if (!adapter->rxdataring_enabled) 1816 vmxnet3_rq_destroy_all_rxdataring(adapter); 1817 1818 return err; 1819 err_out: 1820 vmxnet3_rq_destroy_all(adapter); 1821 return err; 1822 1823 } 1824 1825 /* Multiple queue aware polling function for tx and rx */ 1826 1827 static int 1828 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1829 { 1830 int rcd_done = 0, i; 1831 if (unlikely(adapter->shared->ecr)) 1832 vmxnet3_process_events(adapter); 1833 for (i = 0; i < adapter->num_tx_queues; i++) 1834 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); 1835 1836 for (i = 0; i < adapter->num_rx_queues; i++) 1837 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], 1838 adapter, budget); 1839 return rcd_done; 1840 } 1841 1842 1843 static int 1844 vmxnet3_poll(struct napi_struct *napi, int budget) 1845 { 1846 struct vmxnet3_rx_queue *rx_queue = container_of(napi, 1847 struct vmxnet3_rx_queue, napi); 1848 int rxd_done; 1849 1850 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); 1851 1852 if (rxd_done < budget) { 1853 napi_complete(napi); 1854 vmxnet3_enable_all_intrs(rx_queue->adapter); 1855 } 1856 return rxd_done; 1857 } 1858 1859 /* 1860 * NAPI polling function for MSI-X mode with multiple Rx queues 1861 * Returns the # of the NAPI credit consumed (# of rx descriptors processed) 1862 */ 1863 1864 static int 1865 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) 1866 { 1867 struct vmxnet3_rx_queue *rq = container_of(napi, 1868 struct vmxnet3_rx_queue, napi); 1869 struct vmxnet3_adapter *adapter = rq->adapter; 1870 int rxd_done; 1871 1872 /* When sharing interrupt with corresponding tx queue, process 1873 * tx completions in that queue as well 1874 */ 1875 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { 1876 struct vmxnet3_tx_queue *tq = 1877 &adapter->tx_queue[rq - adapter->rx_queue]; 1878 vmxnet3_tq_tx_complete(tq, adapter); 1879 } 1880 1881 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); 1882 1883 if (rxd_done < budget) { 1884 napi_complete(napi); 1885 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); 1886 } 1887 return rxd_done; 1888 } 1889 1890 1891 #ifdef CONFIG_PCI_MSI 1892 1893 /* 1894 * Handle completion interrupts on tx queues 1895 * Returns whether or not the intr is handled 1896 */ 1897 1898 static irqreturn_t 1899 vmxnet3_msix_tx(int irq, void *data) 1900 { 1901 struct vmxnet3_tx_queue *tq = data; 1902 struct vmxnet3_adapter *adapter = tq->adapter; 1903 1904 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1905 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); 1906 1907 /* Handle the case where only one irq is allocate for all tx queues */ 1908 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 1909 int i; 1910 for (i = 0; i < adapter->num_tx_queues; i++) { 1911 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; 1912 vmxnet3_tq_tx_complete(txq, adapter); 1913 } 1914 } else { 1915 vmxnet3_tq_tx_complete(tq, adapter); 1916 } 1917 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); 1918 1919 return IRQ_HANDLED; 1920 } 1921 1922 1923 /* 1924 * Handle completion interrupts on rx queues. Returns whether or not the 1925 * intr is handled 1926 */ 1927 1928 static irqreturn_t 1929 vmxnet3_msix_rx(int irq, void *data) 1930 { 1931 struct vmxnet3_rx_queue *rq = data; 1932 struct vmxnet3_adapter *adapter = rq->adapter; 1933 1934 /* disable intr if needed */ 1935 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1936 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); 1937 napi_schedule(&rq->napi); 1938 1939 return IRQ_HANDLED; 1940 } 1941 1942 /* 1943 *---------------------------------------------------------------------------- 1944 * 1945 * vmxnet3_msix_event -- 1946 * 1947 * vmxnet3 msix event intr handler 1948 * 1949 * Result: 1950 * whether or not the intr is handled 1951 * 1952 *---------------------------------------------------------------------------- 1953 */ 1954 1955 static irqreturn_t 1956 vmxnet3_msix_event(int irq, void *data) 1957 { 1958 struct net_device *dev = data; 1959 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1960 1961 /* disable intr if needed */ 1962 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1963 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); 1964 1965 if (adapter->shared->ecr) 1966 vmxnet3_process_events(adapter); 1967 1968 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); 1969 1970 return IRQ_HANDLED; 1971 } 1972 1973 #endif /* CONFIG_PCI_MSI */ 1974 1975 1976 /* Interrupt handler for vmxnet3 */ 1977 static irqreturn_t 1978 vmxnet3_intr(int irq, void *dev_id) 1979 { 1980 struct net_device *dev = dev_id; 1981 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1982 1983 if (adapter->intr.type == VMXNET3_IT_INTX) { 1984 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1985 if (unlikely(icr == 0)) 1986 /* not ours */ 1987 return IRQ_NONE; 1988 } 1989 1990 1991 /* disable intr if needed */ 1992 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1993 vmxnet3_disable_all_intrs(adapter); 1994 1995 napi_schedule(&adapter->rx_queue[0].napi); 1996 1997 return IRQ_HANDLED; 1998 } 1999 2000 #ifdef CONFIG_NET_POLL_CONTROLLER 2001 2002 /* netpoll callback. */ 2003 static void 2004 vmxnet3_netpoll(struct net_device *netdev) 2005 { 2006 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2007 2008 switch (adapter->intr.type) { 2009 #ifdef CONFIG_PCI_MSI 2010 case VMXNET3_IT_MSIX: { 2011 int i; 2012 for (i = 0; i < adapter->num_rx_queues; i++) 2013 vmxnet3_msix_rx(0, &adapter->rx_queue[i]); 2014 break; 2015 } 2016 #endif 2017 case VMXNET3_IT_MSI: 2018 default: 2019 vmxnet3_intr(0, adapter->netdev); 2020 break; 2021 } 2022 2023 } 2024 #endif /* CONFIG_NET_POLL_CONTROLLER */ 2025 2026 static int 2027 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 2028 { 2029 struct vmxnet3_intr *intr = &adapter->intr; 2030 int err = 0, i; 2031 int vector = 0; 2032 2033 #ifdef CONFIG_PCI_MSI 2034 if (adapter->intr.type == VMXNET3_IT_MSIX) { 2035 for (i = 0; i < adapter->num_tx_queues; i++) { 2036 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 2037 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", 2038 adapter->netdev->name, vector); 2039 err = request_irq( 2040 intr->msix_entries[vector].vector, 2041 vmxnet3_msix_tx, 0, 2042 adapter->tx_queue[i].name, 2043 &adapter->tx_queue[i]); 2044 } else { 2045 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", 2046 adapter->netdev->name, vector); 2047 } 2048 if (err) { 2049 dev_err(&adapter->netdev->dev, 2050 "Failed to request irq for MSIX, %s, " 2051 "error %d\n", 2052 adapter->tx_queue[i].name, err); 2053 return err; 2054 } 2055 2056 /* Handle the case where only 1 MSIx was allocated for 2057 * all tx queues */ 2058 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 2059 for (; i < adapter->num_tx_queues; i++) 2060 adapter->tx_queue[i].comp_ring.intr_idx 2061 = vector; 2062 vector++; 2063 break; 2064 } else { 2065 adapter->tx_queue[i].comp_ring.intr_idx 2066 = vector++; 2067 } 2068 } 2069 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) 2070 vector = 0; 2071 2072 for (i = 0; i < adapter->num_rx_queues; i++) { 2073 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) 2074 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", 2075 adapter->netdev->name, vector); 2076 else 2077 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", 2078 adapter->netdev->name, vector); 2079 err = request_irq(intr->msix_entries[vector].vector, 2080 vmxnet3_msix_rx, 0, 2081 adapter->rx_queue[i].name, 2082 &(adapter->rx_queue[i])); 2083 if (err) { 2084 netdev_err(adapter->netdev, 2085 "Failed to request irq for MSIX, " 2086 "%s, error %d\n", 2087 adapter->rx_queue[i].name, err); 2088 return err; 2089 } 2090 2091 adapter->rx_queue[i].comp_ring.intr_idx = vector++; 2092 } 2093 2094 sprintf(intr->event_msi_vector_name, "%s-event-%d", 2095 adapter->netdev->name, vector); 2096 err = request_irq(intr->msix_entries[vector].vector, 2097 vmxnet3_msix_event, 0, 2098 intr->event_msi_vector_name, adapter->netdev); 2099 intr->event_intr_idx = vector; 2100 2101 } else if (intr->type == VMXNET3_IT_MSI) { 2102 adapter->num_rx_queues = 1; 2103 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 2104 adapter->netdev->name, adapter->netdev); 2105 } else { 2106 #endif 2107 adapter->num_rx_queues = 1; 2108 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 2109 IRQF_SHARED, adapter->netdev->name, 2110 adapter->netdev); 2111 #ifdef CONFIG_PCI_MSI 2112 } 2113 #endif 2114 intr->num_intrs = vector + 1; 2115 if (err) { 2116 netdev_err(adapter->netdev, 2117 "Failed to request irq (intr type:%d), error %d\n", 2118 intr->type, err); 2119 } else { 2120 /* Number of rx queues will not change after this */ 2121 for (i = 0; i < adapter->num_rx_queues; i++) { 2122 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2123 rq->qid = i; 2124 rq->qid2 = i + adapter->num_rx_queues; 2125 rq->dataRingQid = i + 2 * adapter->num_rx_queues; 2126 } 2127 2128 /* init our intr settings */ 2129 for (i = 0; i < intr->num_intrs; i++) 2130 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; 2131 if (adapter->intr.type != VMXNET3_IT_MSIX) { 2132 adapter->intr.event_intr_idx = 0; 2133 for (i = 0; i < adapter->num_tx_queues; i++) 2134 adapter->tx_queue[i].comp_ring.intr_idx = 0; 2135 adapter->rx_queue[0].comp_ring.intr_idx = 0; 2136 } 2137 2138 netdev_info(adapter->netdev, 2139 "intr type %u, mode %u, %u vectors allocated\n", 2140 intr->type, intr->mask_mode, intr->num_intrs); 2141 } 2142 2143 return err; 2144 } 2145 2146 2147 static void 2148 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 2149 { 2150 struct vmxnet3_intr *intr = &adapter->intr; 2151 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); 2152 2153 switch (intr->type) { 2154 #ifdef CONFIG_PCI_MSI 2155 case VMXNET3_IT_MSIX: 2156 { 2157 int i, vector = 0; 2158 2159 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 2160 for (i = 0; i < adapter->num_tx_queues; i++) { 2161 free_irq(intr->msix_entries[vector++].vector, 2162 &(adapter->tx_queue[i])); 2163 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) 2164 break; 2165 } 2166 } 2167 2168 for (i = 0; i < adapter->num_rx_queues; i++) { 2169 free_irq(intr->msix_entries[vector++].vector, 2170 &(adapter->rx_queue[i])); 2171 } 2172 2173 free_irq(intr->msix_entries[vector].vector, 2174 adapter->netdev); 2175 BUG_ON(vector >= intr->num_intrs); 2176 break; 2177 } 2178 #endif 2179 case VMXNET3_IT_MSI: 2180 free_irq(adapter->pdev->irq, adapter->netdev); 2181 break; 2182 case VMXNET3_IT_INTX: 2183 free_irq(adapter->pdev->irq, adapter->netdev); 2184 break; 2185 default: 2186 BUG(); 2187 } 2188 } 2189 2190 2191 static void 2192 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) 2193 { 2194 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 2195 u16 vid; 2196 2197 /* allow untagged pkts */ 2198 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 2199 2200 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2201 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 2202 } 2203 2204 2205 static int 2206 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 2207 { 2208 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2209 2210 if (!(netdev->flags & IFF_PROMISC)) { 2211 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 2212 unsigned long flags; 2213 2214 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 2215 spin_lock_irqsave(&adapter->cmd_lock, flags); 2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2217 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 2218 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2219 } 2220 2221 set_bit(vid, adapter->active_vlans); 2222 2223 return 0; 2224 } 2225 2226 2227 static int 2228 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 2229 { 2230 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2231 2232 if (!(netdev->flags & IFF_PROMISC)) { 2233 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 2234 unsigned long flags; 2235 2236 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 2237 spin_lock_irqsave(&adapter->cmd_lock, flags); 2238 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2239 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 2240 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2241 } 2242 2243 clear_bit(vid, adapter->active_vlans); 2244 2245 return 0; 2246 } 2247 2248 2249 static u8 * 2250 vmxnet3_copy_mc(struct net_device *netdev) 2251 { 2252 u8 *buf = NULL; 2253 u32 sz = netdev_mc_count(netdev) * ETH_ALEN; 2254 2255 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ 2256 if (sz <= 0xffff) { 2257 /* We may be called with BH disabled */ 2258 buf = kmalloc(sz, GFP_ATOMIC); 2259 if (buf) { 2260 struct netdev_hw_addr *ha; 2261 int i = 0; 2262 2263 netdev_for_each_mc_addr(ha, netdev) 2264 memcpy(buf + i++ * ETH_ALEN, ha->addr, 2265 ETH_ALEN); 2266 } 2267 } 2268 return buf; 2269 } 2270 2271 2272 static void 2273 vmxnet3_set_mc(struct net_device *netdev) 2274 { 2275 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2276 unsigned long flags; 2277 struct Vmxnet3_RxFilterConf *rxConf = 2278 &adapter->shared->devRead.rxFilterConf; 2279 u8 *new_table = NULL; 2280 dma_addr_t new_table_pa = 0; 2281 u32 new_mode = VMXNET3_RXM_UCAST; 2282 2283 if (netdev->flags & IFF_PROMISC) { 2284 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 2285 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable)); 2286 2287 new_mode |= VMXNET3_RXM_PROMISC; 2288 } else { 2289 vmxnet3_restore_vlan(adapter); 2290 } 2291 2292 if (netdev->flags & IFF_BROADCAST) 2293 new_mode |= VMXNET3_RXM_BCAST; 2294 2295 if (netdev->flags & IFF_ALLMULTI) 2296 new_mode |= VMXNET3_RXM_ALL_MULTI; 2297 else 2298 if (!netdev_mc_empty(netdev)) { 2299 new_table = vmxnet3_copy_mc(netdev); 2300 if (new_table) { 2301 size_t sz = netdev_mc_count(netdev) * ETH_ALEN; 2302 2303 rxConf->mfTableLen = cpu_to_le16(sz); 2304 new_table_pa = dma_map_single( 2305 &adapter->pdev->dev, 2306 new_table, 2307 sz, 2308 PCI_DMA_TODEVICE); 2309 } 2310 2311 if (!dma_mapping_error(&adapter->pdev->dev, 2312 new_table_pa)) { 2313 new_mode |= VMXNET3_RXM_MCAST; 2314 rxConf->mfTablePA = cpu_to_le64(new_table_pa); 2315 } else { 2316 netdev_info(netdev, 2317 "failed to copy mcast list, setting ALL_MULTI\n"); 2318 new_mode |= VMXNET3_RXM_ALL_MULTI; 2319 } 2320 } 2321 2322 if (!(new_mode & VMXNET3_RXM_MCAST)) { 2323 rxConf->mfTableLen = 0; 2324 rxConf->mfTablePA = 0; 2325 } 2326 2327 spin_lock_irqsave(&adapter->cmd_lock, flags); 2328 if (new_mode != rxConf->rxMode) { 2329 rxConf->rxMode = cpu_to_le32(new_mode); 2330 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2331 VMXNET3_CMD_UPDATE_RX_MODE); 2332 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2333 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 2334 } 2335 2336 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2337 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2338 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2339 2340 if (new_table_pa) 2341 dma_unmap_single(&adapter->pdev->dev, new_table_pa, 2342 rxConf->mfTableLen, PCI_DMA_TODEVICE); 2343 kfree(new_table); 2344 } 2345 2346 void 2347 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter) 2348 { 2349 int i; 2350 2351 for (i = 0; i < adapter->num_rx_queues; i++) 2352 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); 2353 } 2354 2355 2356 /* 2357 * Set up driver_shared based on settings in adapter. 2358 */ 2359 2360 static void 2361 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) 2362 { 2363 struct Vmxnet3_DriverShared *shared = adapter->shared; 2364 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 2365 struct Vmxnet3_TxQueueConf *tqc; 2366 struct Vmxnet3_RxQueueConf *rqc; 2367 int i; 2368 2369 memset(shared, 0, sizeof(*shared)); 2370 2371 /* driver settings */ 2372 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); 2373 devRead->misc.driverInfo.version = cpu_to_le32( 2374 VMXNET3_DRIVER_VERSION_NUM); 2375 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 2376 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 2377 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 2378 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( 2379 *((u32 *)&devRead->misc.driverInfo.gos)); 2380 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); 2381 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); 2382 2383 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); 2384 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); 2385 2386 /* set up feature flags */ 2387 if (adapter->netdev->features & NETIF_F_RXCSUM) 2388 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 2389 2390 if (adapter->netdev->features & NETIF_F_LRO) { 2391 devRead->misc.uptFeatures |= UPT1_F_LRO; 2392 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2393 } 2394 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2395 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2396 2397 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2398 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2399 devRead->misc.queueDescLen = cpu_to_le32( 2400 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 2401 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); 2402 2403 /* tx queue settings */ 2404 devRead->misc.numTxQueues = adapter->num_tx_queues; 2405 for (i = 0; i < adapter->num_tx_queues; i++) { 2406 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 2407 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); 2408 tqc = &adapter->tqd_start[i].conf; 2409 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); 2410 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); 2411 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); 2412 tqc->ddPA = cpu_to_le64(tq->buf_info_pa); 2413 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2414 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2415 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); 2416 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2417 tqc->ddLen = cpu_to_le32( 2418 sizeof(struct vmxnet3_tx_buf_info) * 2419 tqc->txRingSize); 2420 tqc->intrIdx = tq->comp_ring.intr_idx; 2421 } 2422 2423 /* rx queue settings */ 2424 devRead->misc.numRxQueues = adapter->num_rx_queues; 2425 for (i = 0; i < adapter->num_rx_queues; i++) { 2426 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2427 rqc = &adapter->rqd_start[i].conf; 2428 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); 2429 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); 2430 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); 2431 rqc->ddPA = cpu_to_le64(rq->buf_info_pa); 2432 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); 2433 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); 2434 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); 2435 rqc->ddLen = cpu_to_le32( 2436 sizeof(struct vmxnet3_rx_buf_info) * 2437 (rqc->rxRingSize[0] + 2438 rqc->rxRingSize[1])); 2439 rqc->intrIdx = rq->comp_ring.intr_idx; 2440 if (VMXNET3_VERSION_GE_3(adapter)) { 2441 rqc->rxDataRingBasePA = 2442 cpu_to_le64(rq->data_ring.basePA); 2443 rqc->rxDataRingDescSize = 2444 cpu_to_le16(rq->data_ring.desc_size); 2445 } 2446 } 2447 2448 #ifdef VMXNET3_RSS 2449 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); 2450 2451 if (adapter->rss) { 2452 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 2453 2454 devRead->misc.uptFeatures |= UPT1_F_RSS; 2455 devRead->misc.numRxQueues = adapter->num_rx_queues; 2456 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | 2457 UPT1_RSS_HASH_TYPE_IPV4 | 2458 UPT1_RSS_HASH_TYPE_TCP_IPV6 | 2459 UPT1_RSS_HASH_TYPE_IPV6; 2460 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; 2461 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; 2462 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; 2463 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey)); 2464 2465 for (i = 0; i < rssConf->indTableSize; i++) 2466 rssConf->indTable[i] = ethtool_rxfh_indir_default( 2467 i, adapter->num_rx_queues); 2468 2469 devRead->rssConfDesc.confVer = 1; 2470 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); 2471 devRead->rssConfDesc.confPA = 2472 cpu_to_le64(adapter->rss_conf_pa); 2473 } 2474 2475 #endif /* VMXNET3_RSS */ 2476 2477 /* intr settings */ 2478 devRead->intrConf.autoMask = adapter->intr.mask_mode == 2479 VMXNET3_IMM_AUTO; 2480 devRead->intrConf.numIntrs = adapter->intr.num_intrs; 2481 for (i = 0; i < adapter->intr.num_intrs; i++) 2482 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; 2483 2484 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; 2485 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 2486 2487 /* rx filter settings */ 2488 devRead->rxFilterConf.rxMode = 0; 2489 vmxnet3_restore_vlan(adapter); 2490 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); 2491 2492 /* the rest are already zeroed */ 2493 } 2494 2495 static void 2496 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter) 2497 { 2498 struct Vmxnet3_DriverShared *shared = adapter->shared; 2499 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; 2500 unsigned long flags; 2501 2502 if (!VMXNET3_VERSION_GE_3(adapter)) 2503 return; 2504 2505 spin_lock_irqsave(&adapter->cmd_lock, flags); 2506 cmdInfo->varConf.confVer = 1; 2507 cmdInfo->varConf.confLen = 2508 cpu_to_le32(sizeof(*adapter->coal_conf)); 2509 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); 2510 2511 if (adapter->default_coal_mode) { 2512 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2513 VMXNET3_CMD_GET_COALESCE); 2514 } else { 2515 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2516 VMXNET3_CMD_SET_COALESCE); 2517 } 2518 2519 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2520 } 2521 2522 int 2523 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2524 { 2525 int err, i; 2526 u32 ret; 2527 unsigned long flags; 2528 2529 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2530 " ring sizes %u %u %u\n", adapter->netdev->name, 2531 adapter->skb_buf_size, adapter->rx_buf_per_pkt, 2532 adapter->tx_queue[0].tx_ring.size, 2533 adapter->rx_queue[0].rx_ring[0].size, 2534 adapter->rx_queue[0].rx_ring[1].size); 2535 2536 vmxnet3_tq_init_all(adapter); 2537 err = vmxnet3_rq_init_all(adapter); 2538 if (err) { 2539 netdev_err(adapter->netdev, 2540 "Failed to init rx queue error %d\n", err); 2541 goto rq_err; 2542 } 2543 2544 err = vmxnet3_request_irqs(adapter); 2545 if (err) { 2546 netdev_err(adapter->netdev, 2547 "Failed to setup irq for error %d\n", err); 2548 goto irq_err; 2549 } 2550 2551 vmxnet3_setup_driver_shared(adapter); 2552 2553 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO( 2554 adapter->shared_pa)); 2555 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2556 adapter->shared_pa)); 2557 spin_lock_irqsave(&adapter->cmd_lock, flags); 2558 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2559 VMXNET3_CMD_ACTIVATE_DEV); 2560 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2561 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2562 2563 if (ret != 0) { 2564 netdev_err(adapter->netdev, 2565 "Failed to activate dev: error %u\n", ret); 2566 err = -EINVAL; 2567 goto activate_err; 2568 } 2569 2570 vmxnet3_init_coalesce(adapter); 2571 2572 for (i = 0; i < adapter->num_rx_queues; i++) { 2573 VMXNET3_WRITE_BAR0_REG(adapter, 2574 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN, 2575 adapter->rx_queue[i].rx_ring[0].next2fill); 2576 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 + 2577 (i * VMXNET3_REG_ALIGN)), 2578 adapter->rx_queue[i].rx_ring[1].next2fill); 2579 } 2580 2581 /* Apply the rx filter settins last. */ 2582 vmxnet3_set_mc(adapter->netdev); 2583 2584 /* 2585 * Check link state when first activating device. It will start the 2586 * tx queue if the link is up. 2587 */ 2588 vmxnet3_check_link(adapter, true); 2589 for (i = 0; i < adapter->num_rx_queues; i++) 2590 napi_enable(&adapter->rx_queue[i].napi); 2591 vmxnet3_enable_all_intrs(adapter); 2592 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2593 return 0; 2594 2595 activate_err: 2596 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); 2597 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); 2598 vmxnet3_free_irqs(adapter); 2599 irq_err: 2600 rq_err: 2601 /* free up buffers we allocated */ 2602 vmxnet3_rq_cleanup_all(adapter); 2603 return err; 2604 } 2605 2606 2607 void 2608 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2609 { 2610 unsigned long flags; 2611 spin_lock_irqsave(&adapter->cmd_lock, flags); 2612 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2613 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2614 } 2615 2616 2617 int 2618 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2619 { 2620 int i; 2621 unsigned long flags; 2622 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2623 return 0; 2624 2625 2626 spin_lock_irqsave(&adapter->cmd_lock, flags); 2627 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2628 VMXNET3_CMD_QUIESCE_DEV); 2629 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2630 vmxnet3_disable_all_intrs(adapter); 2631 2632 for (i = 0; i < adapter->num_rx_queues; i++) 2633 napi_disable(&adapter->rx_queue[i].napi); 2634 netif_tx_disable(adapter->netdev); 2635 adapter->link_speed = 0; 2636 netif_carrier_off(adapter->netdev); 2637 2638 vmxnet3_tq_cleanup_all(adapter); 2639 vmxnet3_rq_cleanup_all(adapter); 2640 vmxnet3_free_irqs(adapter); 2641 return 0; 2642 } 2643 2644 2645 static void 2646 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2647 { 2648 u32 tmp; 2649 2650 tmp = *(u32 *)mac; 2651 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); 2652 2653 tmp = (mac[5] << 8) | mac[4]; 2654 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); 2655 } 2656 2657 2658 static int 2659 vmxnet3_set_mac_addr(struct net_device *netdev, void *p) 2660 { 2661 struct sockaddr *addr = p; 2662 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2663 2664 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2665 vmxnet3_write_mac_addr(adapter, addr->sa_data); 2666 2667 return 0; 2668 } 2669 2670 2671 /* ==================== initialization and cleanup routines ============ */ 2672 2673 static int 2674 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) 2675 { 2676 int err; 2677 unsigned long mmio_start, mmio_len; 2678 struct pci_dev *pdev = adapter->pdev; 2679 2680 err = pci_enable_device(pdev); 2681 if (err) { 2682 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); 2683 return err; 2684 } 2685 2686 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 2687 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 2688 dev_err(&pdev->dev, 2689 "pci_set_consistent_dma_mask failed\n"); 2690 err = -EIO; 2691 goto err_set_mask; 2692 } 2693 *dma64 = true; 2694 } else { 2695 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 2696 dev_err(&pdev->dev, 2697 "pci_set_dma_mask failed\n"); 2698 err = -EIO; 2699 goto err_set_mask; 2700 } 2701 *dma64 = false; 2702 } 2703 2704 err = pci_request_selected_regions(pdev, (1 << 2) - 1, 2705 vmxnet3_driver_name); 2706 if (err) { 2707 dev_err(&pdev->dev, 2708 "Failed to request region for adapter: error %d\n", err); 2709 goto err_set_mask; 2710 } 2711 2712 pci_set_master(pdev); 2713 2714 mmio_start = pci_resource_start(pdev, 0); 2715 mmio_len = pci_resource_len(pdev, 0); 2716 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); 2717 if (!adapter->hw_addr0) { 2718 dev_err(&pdev->dev, "Failed to map bar0\n"); 2719 err = -EIO; 2720 goto err_ioremap; 2721 } 2722 2723 mmio_start = pci_resource_start(pdev, 1); 2724 mmio_len = pci_resource_len(pdev, 1); 2725 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); 2726 if (!adapter->hw_addr1) { 2727 dev_err(&pdev->dev, "Failed to map bar1\n"); 2728 err = -EIO; 2729 goto err_bar1; 2730 } 2731 return 0; 2732 2733 err_bar1: 2734 iounmap(adapter->hw_addr0); 2735 err_ioremap: 2736 pci_release_selected_regions(pdev, (1 << 2) - 1); 2737 err_set_mask: 2738 pci_disable_device(pdev); 2739 return err; 2740 } 2741 2742 2743 static void 2744 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) 2745 { 2746 BUG_ON(!adapter->pdev); 2747 2748 iounmap(adapter->hw_addr0); 2749 iounmap(adapter->hw_addr1); 2750 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); 2751 pci_disable_device(adapter->pdev); 2752 } 2753 2754 2755 static void 2756 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 2757 { 2758 size_t sz, i, ring0_size, ring1_size, comp_size; 2759 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; 2760 2761 2762 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 2763 VMXNET3_MAX_ETH_HDR_SIZE) { 2764 adapter->skb_buf_size = adapter->netdev->mtu + 2765 VMXNET3_MAX_ETH_HDR_SIZE; 2766 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) 2767 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; 2768 2769 adapter->rx_buf_per_pkt = 1; 2770 } else { 2771 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; 2772 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + 2773 VMXNET3_MAX_ETH_HDR_SIZE; 2774 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; 2775 } 2776 2777 /* 2778 * for simplicity, force the ring0 size to be a multiple of 2779 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 2780 */ 2781 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2782 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2783 ring0_size = (ring0_size + sz - 1) / sz * sz; 2784 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / 2785 sz * sz); 2786 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2787 ring1_size = (ring1_size + sz - 1) / sz * sz; 2788 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE / 2789 sz * sz); 2790 comp_size = ring0_size + ring1_size; 2791 2792 for (i = 0; i < adapter->num_rx_queues; i++) { 2793 rq = &adapter->rx_queue[i]; 2794 rq->rx_ring[0].size = ring0_size; 2795 rq->rx_ring[1].size = ring1_size; 2796 rq->comp_ring.size = comp_size; 2797 } 2798 } 2799 2800 2801 int 2802 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2803 u32 rx_ring_size, u32 rx_ring2_size, 2804 u16 txdata_desc_size, u16 rxdata_desc_size) 2805 { 2806 int err = 0, i; 2807 2808 for (i = 0; i < adapter->num_tx_queues; i++) { 2809 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 2810 tq->tx_ring.size = tx_ring_size; 2811 tq->data_ring.size = tx_ring_size; 2812 tq->comp_ring.size = tx_ring_size; 2813 tq->txdata_desc_size = txdata_desc_size; 2814 tq->shared = &adapter->tqd_start[i].ctrl; 2815 tq->stopped = true; 2816 tq->adapter = adapter; 2817 tq->qid = i; 2818 err = vmxnet3_tq_create(tq, adapter); 2819 /* 2820 * Too late to change num_tx_queues. We cannot do away with 2821 * lesser number of queues than what we asked for 2822 */ 2823 if (err) 2824 goto queue_err; 2825 } 2826 2827 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; 2828 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; 2829 vmxnet3_adjust_rx_ring_size(adapter); 2830 2831 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); 2832 for (i = 0; i < adapter->num_rx_queues; i++) { 2833 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2834 /* qid and qid2 for rx queues will be assigned later when num 2835 * of rx queues is finalized after allocating intrs */ 2836 rq->shared = &adapter->rqd_start[i].ctrl; 2837 rq->adapter = adapter; 2838 rq->data_ring.desc_size = rxdata_desc_size; 2839 err = vmxnet3_rq_create(rq, adapter); 2840 if (err) { 2841 if (i == 0) { 2842 netdev_err(adapter->netdev, 2843 "Could not allocate any rx queues. " 2844 "Aborting.\n"); 2845 goto queue_err; 2846 } else { 2847 netdev_info(adapter->netdev, 2848 "Number of rx queues changed " 2849 "to : %d.\n", i); 2850 adapter->num_rx_queues = i; 2851 err =