Error Trace

[Home]

Bug # 150

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
19 typedef signed char __s8;
20 typedef unsigned char __u8;
22 typedef short __s16;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
32 typedef __u16 __le16;
33 typedef __u16 __be16;
35 typedef __u32 __be32;
37 typedef __u64 __be64;
40 typedef __u32 __wsum;
257 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
108 typedef __u32 uint32_t;
111 typedef __u64 uint64_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
125 typedef void (*ctor_fn_t)();
67 struct ctl_table ;
58 struct device ;
64 struct net_device ;
467 struct file_operations ;
479 struct completion ;
480 struct pt_regs ;
27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ;
556 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
111 struct timespec ;
112 struct compat_timespec ;
113 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
113 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
113 struct pollfd ;
113 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
113 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ;
113 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ;
39 struct page ;
26 struct task_struct ;
27 struct mm_struct ;
288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ;
66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ;
13 typedef unsigned long pteval_t;
14 typedef unsigned long pmdval_t;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
19 struct __anonstruct_pte_t_32 { pteval_t pte; } ;
19 typedef struct __anonstruct_pte_t_32 pte_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_33 pgd_t;
297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ;
297 typedef struct __anonstruct_pmd_t_35 pmd_t;
423 typedef struct page *pgtable_t;
434 struct file ;
447 struct seq_file ;
483 struct thread_struct ;
485 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
341 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
654 typedef struct cpumask *cpumask_var_t;
38 union __anonunion___u_44 { int __val; char __c[1U]; } ;
23 typedef atomic64_t atomic_long_t;
81 struct static_key { atomic_t enabled; } ;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
254 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ;
26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
169 struct seq_operations ;
372 struct perf_event ;
377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ;
377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t;
378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
33 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_76 rwlock_t;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_91 seqlock_t;
601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_92 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_92 kuid_t;
27 struct __anonstruct_kgid_t_93 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_93 kgid_t;
139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_94 nodemask_t;
80 struct free_area { struct list_head free_list[6U]; unsigned long nr_free; } ;
92 struct pglist_data ;
93 struct zone_padding { char x[0U]; } ;
208 struct zone_reclaim_stat { unsigned long recent_rotated[2U]; unsigned long recent_scanned[2U]; } ;
221 struct lruvec { struct list_head lists[5U]; struct zone_reclaim_stat reclaim_stat; atomic_long_t inactive_age; struct pglist_data *pgdat; } ;
247 typedef unsigned int isolate_mode_t;
255 struct per_cpu_pages { int count; int high; int batch; struct list_head lists[3U]; } ;
268 struct per_cpu_pageset { struct per_cpu_pages pcp; s8 expire; s8 stat_threshold; s8 vm_stat_diff[21U]; } ;
278 struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[26U]; } ;
284 enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4 } ;
292 struct zone { unsigned long watermark[3U]; unsigned long nr_reserved_highatomic; long lowmem_reserve[4U]; int node; struct pglist_data *zone_pgdat; struct per_cpu_pageset *pageset; unsigned long zone_start_pfn; unsigned long managed_pages; unsigned long spanned_pages; unsigned long present_pages; const char *name; unsigned long nr_isolate_pageblock; wait_queue_head_t *wait_table; unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; struct zone_padding _pad1_; struct free_area free_area[11U]; unsigned long flags; spinlock_t lock; struct zone_padding _pad2_; unsigned long percpu_drift_mark; unsigned long compact_cached_free_pfn; unsigned long compact_cached_migrate_pfn[2U]; unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; bool compact_blockskip_flush; bool contiguous; struct zone_padding _pad3_; atomic_long_t vm_stat[21U]; } ;
560 struct zoneref { struct zone *zone; int zone_idx; } ;
585 struct zonelist { struct zoneref _zonerefs[4097U]; } ;
608 struct pglist_data { struct zone node_zones[4U]; struct zonelist node_zonelists[2U]; int nr_zones; unsigned long node_start_pfn; unsigned long node_present_pages; unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; int kswapd_order; enum zone_type kswapd_classzone_idx; int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; spinlock_t numabalancing_migrate_lock; unsigned long numabalancing_migrate_next_window; unsigned long numabalancing_migrate_nr_pages; unsigned long totalreserve_pages; unsigned long min_unmapped_pages; unsigned long min_slab_pages; struct zone_padding _pad1_; spinlock_t lru_lock; spinlock_t split_queue_lock; struct list_head split_queue; unsigned long split_queue_len; struct lruvec lruvec; unsigned int inactive_ratio; unsigned long flags; struct zone_padding _pad2_; struct per_cpu_nodestat *per_cpu_nodestats; atomic_long_t vm_stat[26U]; } ;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
177 struct rw_semaphore ;
178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
178 struct completion { unsigned int done; wait_queue_head_t wait; } ;
446 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1144 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
835 struct nsproxy ;
836 struct ctl_table_root ;
837 struct ctl_table_header ;
838 struct ctl_dir ;
38 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
58 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
97 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
118 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
123 struct __anonstruct____missing_field_name_100 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
123 union __anonunion____missing_field_name_99 { struct __anonstruct____missing_field_name_100 __annonCompField21; struct callback_head rcu; } ;
123 struct ctl_table_set ;
123 struct ctl_table_header { union __anonunion____missing_field_name_99 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ;
144 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
150 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
155 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
278 struct workqueue_struct ;
279 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
268 struct notifier_block ;
53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
26 struct ldt_struct ;
26 struct vdso_image ;
26 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; } ;
26 typedef struct __anonstruct_mm_context_t_165 mm_context_t;
22 struct bio_vec ;
1276 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ;
152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ;
152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ;
152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ;
152 struct dev_pagemap ;
152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ;
152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ;
152 struct kmem_cache ;
152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ;
197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
282 struct userfaultfd_ctx ;
282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ;
289 struct anon_vma ;
289 struct vm_operations_struct ;
289 struct mempolicy ;
289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
362 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
381 struct task_rss_stat { int events; int count[4U]; } ;
389 struct mm_rss_stat { atomic_long_t count[4U]; } ;
394 struct kioctx_table ;
395 struct linux_binfmt ;
395 struct mmu_notifier_mm ;
395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
565 struct vm_fault ;
619 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
199 struct dentry ;
200 struct iattr ;
201 struct super_block ;
202 struct file_system_type ;
203 struct kernfs_open_node ;
204 struct kernfs_iattrs ;
227 struct kernfs_root ;
227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
499 struct sock ;
500 struct kobject ;
501 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
507 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct exception_table_entry ;
24 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
329 struct module_sect_attrs ;
329 struct module_notes_attrs ;
329 struct trace_event_call ;
329 struct trace_enum_map ;
329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_245 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_245 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_247 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_248 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_249 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_250 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_253 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_252 { struct __anonstruct__addr_bnd_253 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_251 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_252 __annonCompField52; } ;
11 struct __anonstruct__sigpoll_254 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_255 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_246 { int _pad[28U]; struct __anonstruct__kill_247 _kill; struct __anonstruct__timer_248 _timer; struct __anonstruct__rt_249 _rt; struct __anonstruct__sigchld_250 _sigchld; struct __anonstruct__sigfault_251 _sigfault; struct __anonstruct__sigpoll_254 _sigpoll; struct __anonstruct__sigsys_255 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_246 _sifields; } ;
118 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
271 struct k_sigaction { struct sigaction sa; } ;
457 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
464 struct pid_namespace ;
464 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ;
125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
41 struct assoc_array_ptr ;
41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_290 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_291 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_293 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_292 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_293 __annonCompField55; } ;
128 struct __anonstruct____missing_field_name_295 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_294 { union key_payload payload; struct __anonstruct____missing_field_name_295 __annonCompField57; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_290 __annonCompField53; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_291 __annonCompField54; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_292 __annonCompField56; union __anonunion____missing_field_name_294 __annonCompField58; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
377 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
377 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
325 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
331 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ;
54 struct cgroup ;
55 struct cgroup_root ;
56 struct cgroup_subsys ;
57 struct cgroup_taskset ;
101 struct cgroup_file { struct kernfs_node *kn; } ;
90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ;
221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ;
306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
546 struct __anonstruct____missing_field_name_299 { u8 is_data; u8 padding; u16 prioidx; u32 classid; } ;
546 union __anonunion____missing_field_name_298 { struct __anonstruct____missing_field_name_299 __annonCompField59; u64 val; } ;
546 struct sock_cgroup_data { union __anonunion____missing_field_name_298 __annonCompField60; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
135 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
493 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
536 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
544 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
551 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
576 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
592 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
614 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
659 struct autogroup ;
660 struct tty_struct ;
660 struct taskstats ;
660 struct tty_audit_buf ;
660 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; atomic_t oom_victims; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
835 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
880 struct backing_dev_info ;
881 struct reclaim_state ;
882 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
896 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
953 struct wake_q_node { struct wake_q_node *next; } ;
1185 struct io_context ;
1219 struct pipe_inode_info ;
1220 struct uts_namespace ;
1221 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1228 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1286 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1321 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1358 struct rt_rq ;
1358 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1376 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1440 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1459 struct sched_class ;
1459 struct files_struct ;
1459 struct compat_robust_list_head ;
1459 struct numa_group ;
1459 struct ftrace_ret_stack ;
1459 struct kcov ;
1459 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int btrace_seq; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; int curr_ret_stack; struct ftrace_ret_stack *ret_stack; unsigned long long ftrace_timestamp; atomic_t trace_overrun; atomic_t tracing_graph_pause; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; struct thread_struct thread; } ;
158 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
27 union __anonunion____missing_field_name_326 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ;
27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_326 __annonCompField64; unsigned long nr_segs; } ;
11 typedef unsigned short __kernel_sa_family_t;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
38 struct kiocb ;
38 struct msghdr { void *msg_name; int msg_namelen; struct iov_iter msg_iter; void *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; struct kiocb *msg_iocb; } ;
217 enum ldv_19069 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ;
53 typedef enum ldv_19069 socket_state;
54 struct poll_table_struct ;
55 struct net ;
72 struct fasync_struct ;
72 struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; unsigned long flags; struct callback_head rcu; } ;
99 struct proto_ops ;
99 struct socket { socket_state state; short type; unsigned long flags; struct socket_wq *wq; struct file *file; struct sock *sk; const struct proto_ops *ops; } ;
125 struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, int); int (*getname)(struct socket *, struct sockaddr *, int *, int); unsigned int (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, unsigned long); int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, char *, unsigned int); int (*getsockopt)(struct socket *, int, int, char *, int *); int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct socket *, int, int, char *, int *); int (*sendmsg)(struct socket *, struct msghdr *, size_t ); int (*recvmsg)(struct socket *, struct msghdr *, size_t , int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*sendpage)(struct socket *, struct page *, int, size_t , int); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*set_peek_off)(struct sock *, int); int (*peek_len)(struct socket *); } ;
63 struct exception_table_entry { int insn; int fixup; int handler; } ;
161 struct in6_addr ;
145 struct sk_buff ;
184 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_346 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_345 { struct __anonstruct____missing_field_name_346 __annonCompField65; } ;
114 struct lockref { union __anonunion____missing_field_name_345 __annonCompField66; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_348 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_347 { struct __anonstruct____missing_field_name_348 __annonCompField67; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_347 __annonCompField68; const unsigned char *name; } ;
65 struct dentry_operations ;
65 union __anonunion____missing_field_name_349 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
65 union __anonunion_d_u_350 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_349 __annonCompField69; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_350 d_u; } ;
121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
591 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
63 struct __anonstruct____missing_field_name_352 { struct radix_tree_node *parent; void *private_data; } ;
63 union __anonunion____missing_field_name_351 { struct __anonstruct____missing_field_name_352 __annonCompField70; struct callback_head callback_head; } ;
63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_351 __annonCompField71; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
44 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
34 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ;
84 struct bio_set ;
85 struct bio ;
86 struct bio_integrity_payload ;
87 struct block_device ;
18 typedef void bio_end_io_t(struct bio *);
20 union __anonunion____missing_field_name_359 { struct bio_integrity_payload *bi_integrity; } ;
20 struct bio { struct bio *bi_next; struct block_device *bi_bdev; int bi_error; unsigned int bi_opf; unsigned short bi_flags; unsigned short bi_ioprio; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; union __anonunion____missing_field_name_359 __annonCompField72; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ;
266 struct delayed_call { void (*fn)(void *); void *arg; } ;
261 struct bdi_writeback ;
262 struct export_operations ;
264 struct kstatfs ;
265 struct swap_info_struct ;
266 struct fscrypt_info ;
267 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
261 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_360 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_360 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_361 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_361 __annonCompField73; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
541 struct writeback_control ;
542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
426 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
447 struct request_queue ;
448 struct hd_struct ;
448 struct gendisk ;
448 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
563 struct posix_acl ;
589 struct inode_operations ;
589 union __anonunion____missing_field_name_366 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
589 union __anonunion____missing_field_name_367 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
589 struct file_lock_context ;
589 struct cdev ;
589 union __anonunion____missing_field_name_368 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
589 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_366 __annonCompField74; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_367 __annonCompField75; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_368 __annonCompField76; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
843 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
851 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
874 union __anonunion_f_u_369 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
874 struct file { union __anonunion_f_u_369 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
959 typedef void *fl_owner_t;
960 struct file_lock ;
961 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
967 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
994 struct nlm_lockowner ;
995 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct __anonstruct_afs_371 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_370 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_371 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_370 fl_u; } ;
1047 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1255 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1290 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1320 struct super_operations ;
1320 struct xattr_handler ;
1320 struct mtd_info ;
1320 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1603 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1616 struct dir_context ;
1641 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1648 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1717 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1774 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
2018 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3193 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
76 struct dma_map_ops ;
76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
24 struct device_private ;
25 struct device_driver ;
26 struct driver_private ;
27 struct class ;
28 struct subsys_private ;
29 struct bus_type ;
30 struct device_node ;
31 struct fwnode_handle ;
32 struct iommu_ops ;
33 struct iommu_group ;
61 struct device_attribute ;
61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
142 struct device_type ;
201 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
207 struct of_device_id ;
207 struct acpi_device_id ;
207 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
357 struct class_attribute ;
357 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
450 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
518 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
546 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
699 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
708 struct irq_domain ;
708 struct dma_coherent_mem ;
708 struct cma ;
708 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
862 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
1327 struct scatterlist ;
89 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ;
308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ;
335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
2451 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
406 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
15 typedef u64 netdev_features_t;
70 union __anonunion_in6_u_382 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ;
70 struct in6_addr { union __anonunion_in6_u_382 in6_u; } ;
41 struct sockaddr_in6 { unsigned short sin6_family; __be16 sin6_port; __be32 sin6_flowinfo; struct in6_addr sin6_addr; __u32 sin6_scope_id; } ;
46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
199 struct pipe_buf_operations ;
199 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ;
27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ;
63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;
295 struct flowi_tunnel { __be64 tun_id; } ;
26 struct flowi_common { int flowic_oif; int flowic_iif; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; __u8 flowic_proto; __u8 flowic_flags; __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; } ;
42 struct __anonstruct_ports_389 { __be16 dport; __be16 sport; } ;
42 struct __anonstruct_icmpt_390 { __u8 type; __u8 code; } ;
42 struct __anonstruct_dnports_391 { __le16 dport; __le16 sport; } ;
42 struct __anonstruct_mht_392 { __u8 type; } ;
42 union flowi_uli { struct __anonstruct_ports_389 ports; struct __anonstruct_icmpt_390 icmpt; struct __anonstruct_dnports_391 dnports; __be32 spi; __be32 gre_key; struct __anonstruct_mht_392 mht; } ;
66 struct flowi4 { struct flowi_common __fl_common; __be32 saddr; __be32 daddr; union flowi_uli uli; } ;
123 struct flowi6 { struct flowi_common __fl_common; struct in6_addr daddr; struct in6_addr saddr; __be32 flowlabel; union flowi_uli uli; } ;
141 struct flowidn { struct flowi_common __fl_common; __le16 daddr; __le16 saddr; union flowi_uli uli; } ;
161 union __anonunion_u_393 { struct flowi_common __fl_common; struct flowi4 ip4; struct flowi6 ip6; struct flowidn dn; } ;
161 struct flowi { union __anonunion_u_393 u; } ;
265 struct napi_struct ;
266 struct nf_conntrack { atomic_t use; } ;
254 union __anonunion____missing_field_name_394 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ;
254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_394 __annonCompField82; } ;
278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
500 typedef unsigned int sk_buff_data_t;
501 struct __anonstruct____missing_field_name_397 { u32 stamp_us; u32 stamp_jiffies; } ;
501 union __anonunion____missing_field_name_396 { u64 v64; struct __anonstruct____missing_field_name_397 __annonCompField83; } ;
501 struct skb_mstamp { union __anonunion____missing_field_name_396 __annonCompField84; } ;
564 union __anonunion____missing_field_name_400 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
564 struct __anonstruct____missing_field_name_399 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_400 __annonCompField85; } ;
564 union __anonunion____missing_field_name_398 { struct __anonstruct____missing_field_name_399 __annonCompField86; struct rb_node rbnode; } ;
564 struct sec_path ;
564 struct __anonstruct____missing_field_name_402 { __u16 csum_start; __u16 csum_offset; } ;
564 union __anonunion____missing_field_name_401 { __wsum csum; struct __anonstruct____missing_field_name_402 __annonCompField88; } ;
564 union __anonunion____missing_field_name_403 { unsigned int napi_id; unsigned int sender_cpu; } ;
564 union __anonunion____missing_field_name_404 { __u32 secmark; __u32 offload_fwd_mark; } ;
564 union __anonunion____missing_field_name_405 { __u32 mark; __u32 reserved_tailroom; } ;
564 union __anonunion____missing_field_name_406 { __be16 inner_protocol; __u8 inner_ipproto; } ;
564 struct sk_buff { union __anonunion____missing_field_name_398 __annonCompField87; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_401 __annonCompField89; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_403 __annonCompField90; union __anonunion____missing_field_name_404 __annonCompField91; union __anonunion____missing_field_name_405 __annonCompField92; union __anonunion____missing_field_name_406 __annonCompField93; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
831 struct dst_entry ;
880 struct rtable ;
65 struct irq_poll ;
5 typedef int irq_poll_fn(struct irq_poll *, int);
6 struct irq_poll { struct list_head list; unsigned long state; int weight; irq_poll_fn *poll; } ;
180 struct ipv6_stable_secret { bool initialized; struct in6_addr secret; } ;
64 struct ipv6_devconf { __s32 forwarding; __s32 hop_limit; __s32 mtu6; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_max_plen; __s32 proxy_ndp; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 optimistic_dad; __s32 use_optimistic; __s32 mc_forwarding; __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; struct ipv6_stable_secret stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; struct ctl_table_header *sysctl_header; } ;
1402 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
43 struct __anonstruct_sync_serial_settings_410 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_410 sync_serial_settings;
50 struct __anonstruct_te1_settings_411 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_411 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_412 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_412 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_413 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_413 fr_proto;
69 struct __anonstruct_fr_proto_pvc_414 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_414 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_415 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_415 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_416 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_416 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
197 union __anonunion_ifs_ifsu_417 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_417 ifs_ifsu; } ;
216 union __anonunion_ifr_ifrn_418 { char ifrn_name[16U]; } ;
216 union __anonunion_ifr_ifru_419 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
216 struct ifreq { union __anonunion_ifr_ifrn_418 ifr_ifrn; union __anonunion_ifr_ifru_419 ifr_ifru; } ;
18 typedef s32 compat_time_t;
39 typedef s32 compat_long_t;
45 typedef u32 compat_uptr_t;
46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ;
278 struct compat_robust_list { compat_uptr_t next; } ;
282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
131 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
195 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
239 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ;
251 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
273 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
299 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
328 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
345 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
444 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
481 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
509 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
613 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
645 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
687 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
720 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
736 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
756 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ;
774 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ;
790 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ;
806 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
823 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
842 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
892 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
1063 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
1071 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1147 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
1515 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ;
39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
97 struct __anonstruct_link_modes_439 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ;
97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_439 link_modes; } ;
158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;
375 struct prot_inuse ;
376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
160 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
79 struct icmpv6_mib_device { atomic_long_t mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
89 struct icmpv6msg_mib_device { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[9U]; } ;
106 struct linux_mib { unsigned long mibs[117U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct proc_dir_entry ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ;
187 struct ipv4_devconf ;
188 struct fib_rules_ops ;
189 struct fib_table ;
190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ;
24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
29 struct inet_peer_base ;
29 struct xt_table ;
29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; atomic_t rt_genid; } ;
142 struct neighbour ;
142 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ;
40 struct rt6_info ;
40 struct rt6_statistics ;
40 struct fib6_table ;
40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; } ;
89 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
95 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ;
20 struct sctp_mib ;
21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
39 struct in_addr { __be32 s_addr; } ;
225 struct sockaddr_in { __kernel_sa_family_t sin_family; __be16 sin_port; struct in_addr sin_addr; unsigned char __pad[8U]; } ;
79 struct nf_logger ;
80 struct nf_queue_handler ;
81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct list_head hooks[13U][8U]; } ;
21 struct ebt_table ;
22 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ;
19 struct hlist_nulls_node ;
19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ;
25 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
30 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
44 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
49 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
54 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ;
65 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ;
72 struct ip_conntrack_stat ;
72 struct nf_ct_event_notifier ;
72 struct nf_exp_event_notifier ;
72 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; } ;
104 struct nft_af_info ;
105 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ;
486 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
708 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ;
21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ;
30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
89 struct mpls_route ;
90 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ;
16 struct proc_ns_operations ;
17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ;
11 struct net_generic ;
12 struct netns_ipvs ;
13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
247 struct __anonstruct_possible_net_t_454 { struct net *net; } ;
247 typedef struct __anonstruct_possible_net_t_454 possible_net_t;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
674 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ;
683 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ;
32 typedef u32 phandle;
34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ;
44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ;
296 struct mii_bus ;
303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ;
41 struct mdio_driver_common { struct device_driver driver; int flags; } ;
244 struct phy_device ;
245 enum ldv_30804 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ;
84 typedef enum ldv_30804 phy_interface_t;
130 enum ldv_30855 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ;
137 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_30855 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ;
218 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ;
233 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ;
326 struct phy_driver ;
326 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; int autoneg; int link_timeout; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; void (*adjust_link)(struct net_device *); } ;
428 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ;
841 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ;
27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_LAST = 5 } ;
36 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ;
70 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ;
86 struct packet_type ;
87 struct dsa_switch ;
87 struct dsa_device_ops ;
87 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ;
140 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; } ;
146 struct dsa_switch_driver ;
146 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_driver *drv; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ;
233 struct switchdev_trans ;
234 struct switchdev_obj ;
235 struct switchdev_obj_port_fdb ;
236 struct switchdev_obj_port_vlan ;
237 struct dsa_switch_driver { struct list_head list; enum dsa_tag_protocol tag_protocol; const char * (*probe)(struct device *, struct device *, int, void **); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); } ;
389 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ;
132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ;
144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ;
58 struct mnt_namespace ;
59 struct ipc_namespace ;
60 struct cgroup_namespace ;
61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ;
86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ;
19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ;
20 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; } ;
609 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct css_set *root_cset; } ;
663 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
99 struct xfrm_policy ;
100 struct xfrm_state ;
116 struct request_sock ;
41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
143 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ;
840 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ;
16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; } ;
118 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ;
96 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ;
486 struct netpoll_info ;
487 struct wireless_dev ;
488 struct wpan_dev ;
489 struct mpls_dev ;
490 struct udp_tunnel_info ;
491 struct bpf_prog ;
69 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ;
112 typedef enum netdev_tx netdev_tx_t;
131 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
194 struct neigh_parms ;
215 struct netdev_hw_addr_list { struct list_head list; int count; } ;
220 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
249 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ;
300 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ;
346 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
394 typedef enum rx_handler_result rx_handler_result_t;
395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
540 struct Qdisc ;
540 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ;
611 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
623 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
635 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
687 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
710 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
723 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
734 struct netdev_tc_txq { u16 count; u16 offset; } ;
745 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
761 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ;
788 struct tc_cls_u32_offload ;
789 struct tc_cls_flower_offload ;
789 struct tc_cls_matchall_offload ;
789 union __anonunion____missing_field_name_470 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; } ;
789 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_470 __annonCompField106; } ;
804 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ;
809 union __anonunion____missing_field_name_471 { struct bpf_prog *prog; bool prog_attached; } ;
809 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_471 __annonCompField107; } ;
832 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;
1354 struct __anonstruct_adj_list_472 { struct list_head upper; struct list_head lower; } ;
1354 struct __anonstruct_all_adj_list_473 { struct list_head upper; struct list_head lower; } ;
1354 struct iw_handler_def ;
1354 struct iw_public_data ;
1354 struct switchdev_ops ;
1354 struct l3mdev_ops ;
1354 struct ndisc_ops ;
1354 struct vlan_info ;
1354 struct tipc_bearer ;
1354 struct in_device ;
1354 struct dn_dev ;
1354 struct inet6_dev ;
1354 struct tcf_proto ;
1354 struct cpu_rmap ;
1354 struct pcpu_lstats ;
1354 struct pcpu_sw_netstats ;
1354 struct pcpu_dstats ;
1354 struct pcpu_vstats ;
1354 union __anonunion____missing_field_name_474 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1354 struct garp_port ;
1354 struct mrp_port ;
1354 struct rtnl_link_ops ;
1354 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_472 adj_list; struct __anonstruct_all_adj_list_473 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct list_head nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; u32 offload_fwd_mark; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_474 __annonCompField108; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ;
2165 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ;
2195 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
103 struct page_counter { atomic_long_t count; unsigned long limit; struct page_counter *parent; unsigned long watermark; unsigned long failcnt; } ;
33 struct eventfd_ctx ;
41 struct vmpressure { unsigned long scanned; unsigned long reclaimed; unsigned long tree_scanned; unsigned long tree_reclaimed; struct spinlock sr_lock; struct list_head events; struct mutex events_lock; struct work_struct work; } ;
44 struct fprop_global { struct percpu_counter events; unsigned int period; seqcount_t sequence; } ;
72 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ;
32 typedef int congested_fn(void *, int);
41 struct bdi_writeback_congested { unsigned long state; atomic_t refcnt; struct backing_dev_info *bdi; int blkcg_id; struct rb_node rb_node; } ;
60 union __anonunion____missing_field_name_479 { struct work_struct release_work; struct callback_head rcu; } ;
60 struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; struct percpu_counter stat[4U]; struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; union __anonunion____missing_field_name_479 __annonCompField109; } ;
134 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned int capabilities; congested_fn *congested_fn; void *congested_data; char *name; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; struct list_head wb_list; struct radix_tree_root cgwb_tree; struct rb_root cgwb_congested_tree; atomic_t usage_cnt; wait_queue_head_t wb_waitq; struct device *dev; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ;
14 enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ;
31 struct writeback_control { long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned char for_kupdate; unsigned char for_background; unsigned char tagged_writepages; unsigned char for_reclaim; unsigned char range_cyclic; unsigned char for_sync; struct bdi_writeback *wb; struct inode *inode; int wb_id; int wb_lcand_id; int wb_tcand_id; size_t wb_bytes; size_t wb_lcand_bytes; size_t wb_tcand_bytes; } ;
101 struct wb_domain { spinlock_t lock; struct fprop_global completions; struct timer_list period_timer; unsigned long period_time; unsigned long dirty_limit_tstamp; unsigned long dirty_limit; } ;
12 typedef void * mempool_alloc_t(gfp_t , void *);
13 typedef void mempool_free_t(void *, void *);
14 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ;
25 typedef struct mempool_s mempool_t;
79 union __anonunion____missing_field_name_480 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ;
79 union __anonunion____missing_field_name_481 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ;
79 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_480 __annonCompField110; union __anonunion____missing_field_name_481 __annonCompField111; unsigned int flags; } ;
92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ;
295 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ;
529 struct bio_list { struct bio *head; struct bio *tail; } ;
661 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ;
87 struct mem_cgroup_id { int id; atomic_t ref; } ;
104 struct mem_cgroup_stat_cpu { long count[11U]; unsigned long events[8U]; unsigned long nr_page_events; unsigned long targets[3U]; } ;
111 struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; unsigned int generation; } ;
117 struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[5U]; struct mem_cgroup_reclaim_iter iter[13U]; struct rb_node tree_node; unsigned long usage_in_excess; bool on_tree; struct mem_cgroup *memcg; } ;
133 struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; } ;
139 struct mem_cgroup_threshold_ary { int current_threshold; unsigned int size; struct mem_cgroup_threshold entries[0U]; } ;
149 struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *primary; struct mem_cgroup_threshold_ary *spare; } ;
160 enum memcg_kmem_state { KMEM_NONE = 0, KMEM_ALLOCATED = 1, KMEM_ONLINE = 2 } ;
166 struct mem_cgroup { struct cgroup_subsys_state css; struct mem_cgroup_id id; struct page_counter memory; struct page_counter swap; struct page_counter memsw; struct page_counter kmem; struct page_counter tcpmem; unsigned long low; unsigned long high; struct work_struct high_work; unsigned long soft_limit; struct vmpressure vmpressure; bool use_hierarchy; bool oom_lock; int under_oom; int swappiness; int oom_kill_disable; struct cgroup_file events_file; struct mutex thresholds_lock; struct mem_cgroup_thresholds thresholds; struct mem_cgroup_thresholds memsw_thresholds; struct list_head oom_notify; unsigned long move_charge_at_immigrate; atomic_t moving_account; spinlock_t move_lock; struct task_struct *move_lock_task; unsigned long move_lock_flags; struct mem_cgroup_stat_cpu *stat; unsigned long socket_pressure; bool tcpmem_active; int tcpmem_pressure; int kmemcg_id; enum memcg_kmem_state kmem_state; int last_scanned_node; nodemask_t scan_nodes; atomic_t numainfo_events; atomic_t numainfo_updating; struct list_head cgwb_list; struct wb_domain cgwb_domain; struct list_head event_list; spinlock_t event_list_lock; struct mem_cgroup_per_node *nodeinfo[0U]; } ;
27 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ;
41 struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; } ;
51 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ;
519 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ;
122 struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; } ;
13 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; } ;
87 struct nla_policy { u16 type; u16 len; } ;
25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ;
158 struct Qdisc_ops ;
159 struct qdisc_walker ;
160 struct tcf_walker ;
30 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ;
38 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct list_head list; u32 handle; u32 parent; void *u32_node; struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; struct sk_buff *gso_skb; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; struct sk_buff *skb_bad_txq; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ;
126 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 ); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ;
158 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ;
183 struct tcf_result { unsigned long class; u32 classid; } ;
189 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); bool (*destroy)(struct tcf_proto *, bool ); unsigned long int (*get)(struct tcf_proto *, u32 ); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ;
214 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct callback_head rcu; } ;
806 struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; } ;
49 struct bpf_insn { __u8 code; unsigned char dst_reg; unsigned char src_reg; __s16 off; __s32 imm; } ;
88 enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6 } ;
472 struct bpf_prog_aux ;
323 struct sock_fprog_kern { u16 len; struct sock_filter *filter; } ;
334 union __anonunion____missing_field_name_505 { struct sock_filter insns[0U]; struct bpf_insn insnsi[0U]; } ;
334 struct bpf_prog { u16 pages; unsigned char jited; unsigned char gpl_compatible; unsigned char cb_access; unsigned char dst_needed; u32 len; enum bpf_prog_type type; struct bpf_prog_aux *aux; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *, const struct bpf_insn *); union __anonunion____missing_field_name_505 __annonCompField118; } ;
355 struct sk_filter { atomic_t refcnt; struct callback_head rcu; struct bpf_prog *prog; } ;
138 struct pollfd { int fd; short events; short revents; } ;
32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ;
187 struct neigh_table ;
187 struct neigh_parms { possible_net_t net; struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; atomic_t refcnt; struct callback_head callback_head; int reachable_time; int data[13U]; unsigned long data_state[1U]; } ;
110 struct neigh_statistics { unsigned long allocs; unsigned long destroys; unsigned long hash_grows; unsigned long res_failed; unsigned long lookups; unsigned long hits; unsigned long rcv_probes_mcast; unsigned long rcv_probes_ucast; unsigned long periodic_gc_runs; unsigned long forced_gc_runs; unsigned long unres_discards; unsigned long table_fulls; } ;
130 struct neigh_ops ;
130 struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; __u8 flags; __u8 nud_state; __u8 type; __u8 dead; seqlock_t ha_lock; unsigned char ha[32U]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct callback_head rcu; struct net_device *dev; u8 primary_key[0U]; } ;
159 struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); } ;
167 struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; u8 flags; u8 key[0U]; } ;
175 struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4U]; struct callback_head rcu; } ;
188 struct neigh_table { int family; int entry_size; int key_len; __be16 protocol; __u32 (*hash)(const void *, const struct net_device *, __u32 *); bool (*key_eq)(const struct neighbour *, const void *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; rwlock_t lock; unsigned long last_rand; struct neigh_statistics *stats; struct neigh_hash_table *nht; struct pneigh_entry **phash_buckets; } ;
520 struct lwtunnel_state ;
520 struct dn_route ;
520 union __anonunion____missing_field_name_521 { struct dst_entry *next; struct rtable *rt_next; struct rt6_info *rt6_next; struct dn_route *dn_next; } ;
520 struct dst_entry { struct callback_head callback_head; struct dst_entry *child; struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; struct dst_entry *path; struct dst_entry *from; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct net *, struct sock *, struct sk_buff *); unsigned short flags; unsigned short pending_confirm; short error; short obsolete; unsigned short header_len; unsigned short trailer_len; __u32 tclassid; long __pad_to_align_refcnt[2U]; atomic_t __refcnt; int __use; unsigned long lastuse; struct lwtunnel_state *lwtstate; union __anonunion____missing_field_name_521 __annonCompField119; } ;
110 struct __anonstruct_socket_lock_t_522 { spinlock_t slock; int owned; wait_queue_head_t wq; struct lockdep_map dep_map; } ;
110 typedef struct __anonstruct_socket_lock_t_522 socket_lock_t;
110 struct proto ;
116 typedef __u32 __portpair;
117 typedef __u64 __addrpair;
118 struct __anonstruct____missing_field_name_524 { __be32 skc_daddr; __be32 skc_rcv_saddr; } ;
118 union __anonunion____missing_field_name_523 { __addrpair skc_addrpair; struct __anonstruct____missing_field_name_524 __annonCompField120; } ;
118 union __anonunion____missing_field_name_525 { unsigned int skc_hash; __u16 skc_u16hashes[2U]; } ;
118 struct __anonstruct____missing_field_name_527 { __be16 skc_dport; __u16 skc_num; } ;
118 union __anonunion____missing_field_name_526 { __portpair skc_portpair; struct __anonstruct____missing_field_name_527 __annonCompField123; } ;
118 union __anonunion____missing_field_name_528 { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; } ;
118 struct inet_timewait_death_row ;
118 union __anonunion____missing_field_name_529 { unsigned long skc_flags; struct sock *skc_listener; struct inet_timewait_death_row *skc_tw_dr; } ;
118 union __anonunion____missing_field_name_530 { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; } ;
118 union __anonunion____missing_field_name_531 { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; } ;
118 union __anonunion____missing_field_name_532 { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; } ;
118 struct sock_common { union __anonunion____missing_field_name_523 __annonCompField121; union __anonunion____missing_field_name_525 __annonCompField122; union __anonunion____missing_field_name_526 __annonCompField124; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; unsigned char skc_reuseport; unsigned char skc_ipv6only; unsigned char skc_net_refcnt; int skc_bound_dev_if; union __anonunion____missing_field_name_528 __annonCompField125; struct proto *skc_prot; possible_net_t skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; atomic64_t skc_cookie; union __anonunion____missing_field_name_529 __annonCompField126; int skc_dontcopy_begin[0U]; union __anonunion____missing_field_name_530 __annonCompField127; int skc_tx_queue_mapping; union __anonunion____missing_field_name_531 __annonCompField128; atomic_t skc_refcnt; int skc_dontcopy_end[0U]; union __anonunion____missing_field_name_532 __annonCompField129; } ;
230 struct __anonstruct_sk_backlog_533 { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } ;
230 union __anonunion____missing_field_name_534 { struct socket_wq *sk_wq; struct socket_wq *sk_wq_raw; } ;
230 struct sock_reuseport ;
230 struct sock { struct sock_common __sk_common; socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; struct __anonstruct_sk_backlog_533 sk_backlog; int sk_forward_alloc; __u32 sk_txhash; unsigned int sk_napi_id; unsigned int sk_ll_usec; atomic_t sk_drops; int sk_rcvbuf; struct sk_filter *sk_filter; union __anonunion____missing_field_name_534 __annonCompField130; struct xfrm_policy *sk_policy[2U]; struct dst_entry *sk_rx_dst; struct dst_entry *sk_dst_cache; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_write_queue; unsigned char sk_padding; unsigned char sk_no_check_tx; unsigned char sk_no_check_rx; unsigned char sk_userlocks; unsigned char sk_protocol; unsigned short sk_type; int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; u32 sk_max_pacing_rate; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; __u32 sk_priority; __u32 sk_mark; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; u8 sk_shutdown; u32 sk_tskey; struct socket *sk_socket; void *sk_user_data; struct page_frag sk_frag; struct sk_buff *sk_send_head; __s32 sk_peek_off; int sk_write_pending; void *sk_security; struct sock_cgroup_data sk_cgrp_data; struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); struct sock_reuseport *sk_reuseport_cb; struct callback_head sk_rcu; } ;
948 struct request_sock_ops ;
949 struct timewait_sock_ops ;
950 struct inet_hashinfo ;
951 struct raw_hashinfo ;
965 struct udp_table ;
965 union __anonunion_h_545 { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; } ;
965 struct proto { void (*close)(struct sock *, long); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, int, int *); int (*ioctl)(struct sock *, int, unsigned long); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, char *, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct sock *, int, int, char *, int *); int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); int (*sendmsg)(struct sock *, struct msghdr *, size_t ); int (*recvmsg)(struct sock *, struct msghdr *, size_t , int, int, int *); int (*sendpage)(struct sock *, struct page *, int, size_t , int); int (*bind)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); void (*release_cb)(struct sock *); int (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, unsigned short); void (*clear_sk)(struct sock *, int); unsigned int inuse_idx; bool (*stream_memory_free)(const struct sock *); void (*enter_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; struct percpu_counter *sockets_allocated; int *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; int slab_flags; struct percpu_counter *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union __anonunion_h_545 h; struct module *owner; char name[32U]; struct list_head node; int (*diag_destroy)(struct sock *, int); } ;
174 struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *, struct request_sock *); void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(const struct sock *, struct sk_buff *); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(const struct request_sock *); } ;
46 struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; unsigned char cookie_ts; unsigned char num_timeout; u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 *saved_syn; u32 secid; u32 peer_secid; } ;
18 struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; __u8 res2; __u8 action; __u32 flags; } ;
68 struct fib_rule { struct list_head list; int iifindex; int oifindex; u32 mark; u32 mark_mask; u32 flags; u32 table; u8 action; u8 l3mdev; u32 target; __be64 tun_id; struct fib_rule *ctarget; struct net *fr_net; atomic_t refcnt; u32 pref; int suppress_ifgroup; int suppress_prefixlen; char iifname[16U]; char oifname[16U]; struct callback_head rcu; } ;
35 struct fib_lookup_arg { void *lookup_ptr; void *result; struct fib_rule *rule; u32 table; int flags; } ;
43 struct fib_rules_ops { int family; struct list_head list; int rule_size; int addr_size; int unresolved_rules; int nr_goto_rules; int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **); int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); size_t (*nlmsg_payload)(struct fib_rule *); void (*flush_cache)(struct fib_rules_ops *); int nlgroup; const struct nla_policy *policy; struct list_head rules_list; struct module *owner; struct net *fro_net; struct callback_head rcu; } ;
140 struct l3mdev_ops { u32 (*l3mdev_fib_table)(const struct net_device *); struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16 ); struct rtable * (*l3mdev_get_rtable)(const struct net_device *, const struct flowi4 *); int (*l3mdev_get_saddr)(struct net_device *, struct flowi4 *); struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *, struct flowi6 *); int (*l3mdev_get_saddr6)(struct net_device *, const struct sock *, struct flowi6 *); } ;
328 struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *, struct sock *, void *); void (*twsk_destructor)(struct sock *); } ;
39 struct inet_timewait_death_row { atomic_t tw_count; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_max_tw_buckets; } ;
100 struct ip6_sf_list { struct ip6_sf_list *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2U]; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; } ;
109 struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 *next; struct ip6_sf_list *mca_sources; struct ip6_sf_list *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2U]; struct timer_list mca_timer; unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; unsigned long mca_cstamp; unsigned long mca_tstamp; } ;
141 struct ifacaddr6 { struct in6_addr aca_addr; struct inet6_dev *aca_idev; struct rt6_info *aca_rt; struct ifacaddr6 *aca_next; int aca_users; atomic_t aca_refcnt; unsigned long aca_cstamp; unsigned long aca_tstamp; } ;
152 struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; struct ipstats_mib *ipv6; struct icmpv6_mib_device *icmpv6dev; struct icmpv6msg_mib_device *icmpv6msgdev; } ;
163 struct inet6_dev { struct net_device *dev; struct list_head addr_list; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; spinlock_t mc_lock; unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; unsigned long mc_v1_seen; unsigned long mc_qi; unsigned long mc_qri; unsigned long mc_maxdelay; struct timer_list mc_gq_timer; struct timer_list mc_ifc_timer; struct timer_list mc_dad_timer; struct ifacaddr6 *ac_list; rwlock_t lock; atomic_t refcnt; __u32 if_flags; int dead; u8 rndid[8U]; struct timer_list regen_timer; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __u8 rs_probes; __u8 addr_gen_mode; unsigned long tstamp; struct callback_head rcu; } ;
47 struct prefix_info ;
98 struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; } ;
103 struct ndisc_options { struct nd_opt_hdr *nd_opt_array[6U]; struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; struct nd_opt_hdr *nd_802154_opt_array[3U]; } ;
134 struct ndisc_ops { int (*is_useropt)(u8 ); int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); void (*update)(const struct net_device *, struct neighbour *, u32 , u8 , const struct ndisc_options *); int (*opt_addr_space)(const struct net_device *, u8 , struct neighbour *, u8 *, u8 **); void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8 , const u8 *); void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32 , bool , bool , __u32 , u32 , bool ); } ;
37 struct ipv4_addr_key { __be32 addr; int vif; } ;
23 union __anonunion____missing_field_name_583 { struct ipv4_addr_key a4; struct in6_addr a6; u32 key[4U]; } ;
23 struct inetpeer_addr { union __anonunion____missing_field_name_583 __annonCompField133; __u16 family; } ;
34 union __anonunion____missing_field_name_584 { struct list_head gc_list; struct callback_head gc_rcu; } ;
34 struct __anonstruct____missing_field_name_586 { atomic_t rid; } ;
34 union __anonunion____missing_field_name_585 { struct __anonstruct____missing_field_name_586 __annonCompField135; struct callback_head rcu; struct inet_peer *gc_next; } ;
34 struct inet_peer { struct inet_peer *avl_left; struct inet_peer *avl_right; struct inetpeer_addr daddr; __u32 avl_height; u32 metrics[16U]; u32 rate_tokens; unsigned long rate_last; union __anonunion____missing_field_name_584 __annonCompField134; union __anonunion____missing_field_name_585 __annonCompField136; __u32 dtime; atomic_t refcnt; } ;
65 struct inet_peer_base { struct inet_peer *root; seqlock_t lock; int total; } ;
174 struct fib_table { struct hlist_node tb_hlist; u32 tb_id; int tb_num_default; struct callback_head rcu; unsigned long *tb_data; unsigned long __data[0U]; } ;
48 struct uncached_list ;
49 struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; __be32 rt_gateway; u32 rt_pmtu; u32 rt_table_id; struct list_head rt_uncached; struct uncached_list *rt_uncached_list; } ;
213 struct in_ifaddr ;
583 struct mmu_notifier ;
584 struct mmu_notifier_ops ;
585 struct mmu_notifier_mm { struct hlist_head list; spinlock_t lock; } ;
26 struct mmu_notifier_ops { void (*release)(struct mmu_notifier *, struct mm_struct *); int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); int (*clear_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); int (*test_young)(struct mmu_notifier *, struct mm_struct *, unsigned long); void (*change_pte)(struct mmu_notifier *, struct mm_struct *, unsigned long, pte_t ); void (*invalidate_page)(struct mmu_notifier *, struct mm_struct *, unsigned long); void (*invalidate_range_start)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); void (*invalidate_range_end)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); void (*invalidate_range)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); } ;
180 struct mmu_notifier { struct hlist_node hlist; const struct mmu_notifier_ops *ops; } ;
66 struct __anonstruct_global_594 { __be64 subnet_prefix; __be64 interface_id; } ;
66 union ib_gid { u8 raw[16U]; struct __anonstruct_global_594 global; } ;
76 enum ib_gid_type { IB_GID_TYPE_IB = 0, IB_GID_TYPE_ROCE = 0, IB_GID_TYPE_ROCE_UDP_ENCAP = 1, IB_GID_TYPE_SIZE = 2 } ;
83 struct ib_gid_attr { enum ib_gid_type gid_type; struct net_device *ndev; } ;
151 enum rdma_link_layer { IB_LINK_LAYER_UNSPECIFIED = 0, IB_LINK_LAYER_INFINIBAND = 1, IB_LINK_LAYER_ETHERNET = 2 } ;
205 enum ib_atomic_cap { IB_ATOMIC_NONE = 0, IB_ATOMIC_HCA = 1, IB_ATOMIC_GLOB = 2 } ;
223 struct __anonstruct_per_transport_caps_595 { uint32_t rc_odp_caps; uint32_t uc_odp_caps; uint32_t ud_odp_caps; } ;
223 struct ib_odp_caps { uint64_t general_caps; struct __anonstruct_per_transport_caps_595 per_transport_caps; } ;
268 struct ib_cq_init_attr { unsigned int cqe; int comp_vector; u32 flags; } ;
274 struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; u64 max_mr_size; u64 page_size_cap; u32 vendor_id; u32 vendor_part_id; u32 hw_ver; int max_qp; int max_qp_wr; u64 device_cap_flags; int max_sge; int max_sge_rd; int max_cq; int max_cqe; int max_mr; int max_pd; int max_qp_rd_atom; int max_ee_rd_atom; int max_res_rd_atom; int max_qp_init_rd_atom; int max_ee_init_rd_atom; enum ib_atomic_cap atomic_cap; enum ib_atomic_cap masked_atomic_cap; int max_ee; int max_rdd; int max_mw; int max_raw_ipv6_qp; int max_raw_ethy_qp; int max_mcast_grp; int max_mcast_qp_attach; int max_total_mcast_qp_attach; int max_ah; int max_fmr; int max_map_per_fmr; int max_srq; int max_srq_wr; int max_srq_sge; unsigned int max_fast_reg_page_list_len; u16 max_pkeys; u8 local_ca_ack_delay; int sig_prot_cap; int sig_guard_cap; struct ib_odp_caps odp_caps; uint64_t timestamp_mask; uint64_t hca_core_clock; } ;
322 enum ib_mtu { IB_MTU_256 = 1, IB_MTU_512 = 2, IB_MTU_1024 = 3, IB_MTU_2048 = 4, IB_MTU_4096 = 5 } ;
342 enum ib_port_state { IB_PORT_NOP = 0, IB_PORT_DOWN = 1, IB_PORT_INIT = 2, IB_PORT_ARMED = 3, IB_PORT_ACTIVE = 4, IB_PORT_ACTIVE_DEFER = 5 } ;
405 struct rdma_hw_stats { unsigned long timestamp; unsigned long lifespan; const const char **names; int num_counters; u64 value[]; } ;
454 struct ib_port_attr { u64 subnet_prefix; enum ib_port_state state; enum ib_mtu max_mtu; enum ib_mtu active_mtu; int gid_tbl_len; u32 port_cap_flags; u32 max_msg_sz; u32 bad_pkey_cntr; u32 qkey_viol_cntr; u16 pkey_tbl_len; u16 lid; u16 sm_lid; u8 lmc; u8 max_vl_num; u8 sm_sl; u8 subnet_timeout; u8 init_type_reply; u8 active_width; u8 active_speed; u8 phys_state; bool grh_required; } ;
527 struct ib_device_modify { u64 sys_image_guid; char node_desc[64U]; } ;
538 struct ib_port_modify { u32 set_port_cap_mask; u32 clr_port_cap_mask; u8 init_type; } ;
544 enum ib_event_type { IB_EVENT_CQ_ERR = 0, IB_EVENT_QP_FATAL = 1, IB_EVENT_QP_REQ_ERR = 2, IB_EVENT_QP_ACCESS_ERR = 3, IB_EVENT_COMM_EST = 4, IB_EVENT_SQ_DRAINED = 5, IB_EVENT_PATH_MIG = 6, IB_EVENT_PATH_MIG_ERR = 7, IB_EVENT_DEVICE_FATAL = 8, IB_EVENT_PORT_ACTIVE = 9, IB_EVENT_PORT_ERR = 10, IB_EVENT_LID_CHANGE = 11, IB_EVENT_PKEY_CHANGE = 12, IB_EVENT_SM_CHANGE = 13, IB_EVENT_SRQ_ERR = 14, IB_EVENT_SRQ_LIMIT_REACHED = 15, IB_EVENT_QP_LAST_WQE_REACHED = 16, IB_EVENT_CLIENT_REREGISTER = 17, IB_EVENT_GID_CHANGE = 18, IB_EVENT_WQ_FATAL = 19 } ;
569 struct ib_device ;
569 struct ib_cq ;
569 struct ib_qp ;
569 struct ib_srq ;
569 struct ib_wq ;
569 union __anonunion_element_596 { struct ib_cq *cq; struct ib_qp *qp; struct ib_srq *srq; struct ib_wq *wq; u8 port_num; } ;
569 struct ib_event { struct ib_device *device; union __anonunion_element_596 element; enum ib_event_type event; } ;
581 struct ib_event_handler { struct ib_device *device; void (*handler)(struct ib_event_handler *, struct ib_event *); struct list_head list; } ;
587 struct ib_global_route { union ib_gid dgid; u32 flow_label; u8 sgid_index; u8 hop_limit; u8 traffic_class; } ;
602 struct ib_grh { __be32 version_tclass_flow; __be16 paylen; u8 next_hdr; u8 hop_limit; union ib_gid sgid; union ib_gid dgid; } ;
669 enum ib_mr_type { IB_MR_TYPE_MEM_REG = 0, IB_MR_TYPE_SIGNATURE = 1, IB_MR_TYPE_SG_GAPS = 2 } ;
758 enum ib_sig_err_type { IB_SIG_BAD_GUARD = 0, IB_SIG_BAD_REFTAG = 1, IB_SIG_BAD_APPTAG = 2 } ;
764 struct ib_sig_err { enum ib_sig_err_type err_type; u32 expected; u32 actual; u64 sig_err_offset; u32 key; } ;
779 struct ib_mr_status { u32 fail_status; struct ib_sig_err sig_err; } ;
799 struct ib_ah_attr { struct ib_global_route grh; u16 dlid; u8 sl; u8 src_path_bits; u8 static_rate; u8 ah_flags; u8 port_num; u8 dmac[6U]; } ;
810 enum ib_wc_status { IB_WC_SUCCESS = 0, IB_WC_LOC_LEN_ERR = 1, IB_WC_LOC_QP_OP_ERR = 2, IB_WC_LOC_EEC_OP_ERR = 3, IB_WC_LOC_PROT_ERR = 4, IB_WC_WR_FLUSH_ERR = 5, IB_WC_MW_BIND_ERR = 6, IB_WC_BAD_RESP_ERR = 7, IB_WC_LOC_ACCESS_ERR = 8, IB_WC_REM_INV_REQ_ERR = 9, IB_WC_REM_ACCESS_ERR = 10, IB_WC_REM_OP_ERR = 11, IB_WC_RETRY_EXC_ERR = 12, IB_WC_RNR_RETRY_EXC_ERR = 13, IB_WC_LOC_RDD_VIOL_ERR = 14, IB_WC_REM_INV_RD_REQ_ERR = 15, IB_WC_REM_ABORT_ERR = 16, IB_WC_INV_EECN_ERR = 17, IB_WC_INV_EEC_STATE_ERR = 18, IB_WC_FATAL_ERR = 19, IB_WC_RESP_TIMEOUT_ERR = 20, IB_WC_GENERAL_ERR = 21 } ;
837 enum ib_wc_opcode { IB_WC_SEND = 0, IB_WC_RDMA_WRITE = 1, IB_WC_RDMA_READ = 2, IB_WC_COMP_SWAP = 3, IB_WC_FETCH_ADD = 4, IB_WC_LSO = 5, IB_WC_LOCAL_INV = 6, IB_WC_REG_MR = 7, IB_WC_MASKED_COMP_SWAP = 8, IB_WC_MASKED_FETCH_ADD = 9, IB_WC_RECV = 128, IB_WC_RECV_RDMA_WITH_IMM = 129 } ;
862 struct ib_cqe ;
862 union __anonunion____missing_field_name_599 { u64 wr_id; struct ib_cqe *wr_cqe; } ;
862 union __anonunion_ex_600 { __be32 imm_data; u32 invalidate_rkey; } ;
862 struct ib_wc { union __anonunion____missing_field_name_599 __annonCompField139; enum ib_wc_status status; enum ib_wc_opcode opcode; u32 vendor_err; u32 byte_len; struct ib_qp *qp; union __anonunion_ex_600 ex; u32 src_qp; int wc_flags; u16 pkey_index; u16 slid; u8 sl; u8 dlid_path_bits; u8 port_num; u8 smac[6U]; u16 vlan_id; u8 network_hdr_type; } ;
892 enum ib_cq_notify_flags { IB_CQ_SOLICITED = 1, IB_CQ_NEXT_COMP = 2, IB_CQ_SOLICITED_MASK = 3, IB_CQ_REPORT_MISSED_EVENTS = 4 } ;
899 enum ib_srq_type { IB_SRQT_BASIC = 0, IB_SRQT_XRC = 1 } ;
904 enum ib_srq_attr_mask { IB_SRQ_MAX_WR = 1, IB_SRQ_LIMIT = 2 } ;
909 struct ib_srq_attr { u32 max_wr; u32 max_sge; u32 srq_limit; } ;
915 struct ib_xrcd ;
915 struct __anonstruct_xrc_602 { struct ib_xrcd *xrcd; struct ib_cq *cq; } ;
915 union __anonunion_ext_601 { struct __anonstruct_xrc_602 xrc; } ;
915 struct ib_srq_init_attr { void (*event_handler)(struct ib_event *, void *); void *srq_context; struct ib_srq_attr attr; enum ib_srq_type srq_type; union __anonunion_ext_601 ext; } ;
929 struct ib_qp_cap { u32 max_send_wr; u32 max_recv_wr; u32 max_send_sge; u32 max_recv_sge; u32 max_inline_data; u32 max_rdma_ctxs; } ;
944 enum ib_sig_type { IB_SIGNAL_ALL_WR = 0, IB_SIGNAL_REQ_WR = 1 } ;
949 enum ib_qp_type { IB_QPT_SMI = 0, IB_QPT_GSI = 1, IB_QPT_RC = 2, IB_QPT_UC = 3, IB_QPT_UD = 4, IB_QPT_RAW_IPV6 = 5, IB_QPT_RAW_ETHERTYPE = 6, IB_QPT_RAW_PACKET = 8, IB_QPT_XRC_INI = 9, IB_QPT_XRC_TGT = 10, IB_QPT_MAX = 11, IB_QPT_RESERVED1 = 4096, IB_QPT_RESERVED2 = 4097, IB_QPT_RESERVED3 = 4098, IB_QPT_RESERVED4 = 4099, IB_QPT_RESERVED5 = 4100, IB_QPT_RESERVED6 = 4101, IB_QPT_RESERVED7 = 4102, IB_QPT_RESERVED8 = 4103, IB_QPT_RESERVED9 = 4104, IB_QPT_RESERVED10 = 4105 } ;
973 enum ib_qp_create_flags { IB_QP_CREATE_IPOIB_UD_LSO = 1, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, IB_QP_CREATE_CROSS_CHANNEL = 4, IB_QP_CREATE_MANAGED_SEND = 8, IB_QP_CREATE_MANAGED_RECV = 16, IB_QP_CREATE_NETIF_QP = 32, IB_QP_CREATE_SIGNATURE_EN = 64, IB_QP_CREATE_USE_GFP_NOIO = 128, IB_QP_CREATE_SCATTER_FCS = 256, IB_QP_CREATE_RESERVED_START = 67108864, IB_QP_CREATE_RESERVED_END = -2147483648 } ;
987 struct ib_rwq_ind_table ;
987 struct ib_qp_init_attr { void (*event_handler)(struct ib_event *, void *); void *qp_context; struct ib_cq *send_cq; struct ib_cq *recv_cq; struct ib_srq *srq; struct ib_xrcd *xrcd; struct ib_qp_cap cap; enum ib_sig_type sq_sig_type; enum ib_qp_type qp_type; enum ib_qp_create_flags create_flags; u8 port_num; struct ib_rwq_ind_table *rwq_ind_tbl; } ;
1092 enum ib_qp_state { IB_QPS_RESET = 0, IB_QPS_INIT = 1, IB_QPS_RTR = 2, IB_QPS_RTS = 3, IB_QPS_SQD = 4, IB_QPS_SQE = 5, IB_QPS_ERR = 6 } ;
1102 enum ib_mig_state { IB_MIG_MIGRATED = 0, IB_MIG_REARM = 1, IB_MIG_ARMED = 2 } ;
1108 enum ib_mw_type { IB_MW_TYPE_1 = 1, IB_MW_TYPE_2 = 2 } ;
1113 struct ib_qp_attr { enum ib_qp_state qp_state; enum ib_qp_state cur_qp_state; enum ib_mtu path_mtu; enum ib_mig_state path_mig_state; u32 qkey; u32 rq_psn; u32 sq_psn; u32 dest_qp_num; int qp_access_flags; struct ib_qp_cap cap; struct ib_ah_attr ah_attr; struct ib_ah_attr alt_ah_attr; u16 pkey_index; u16 alt_pkey_index; u8 en_sqd_async_notify; u8 sq_draining; u8 max_rd_atomic; u8 max_dest_rd_atomic; u8 min_rnr_timer; u8 port_num; u8 timeout; u8 retry_cnt; u8 rnr_retry; u8 alt_port_num; u8 alt_timeout; } ;
1141 enum ib_wr_opcode { IB_WR_RDMA_WRITE = 0, IB_WR_RDMA_WRITE_WITH_IMM = 1, IB_WR_SEND = 2, IB_WR_SEND_WITH_IMM = 3, IB_WR_RDMA_READ = 4, IB_WR_ATOMIC_CMP_AND_SWP = 5, IB_WR_ATOMIC_FETCH_AND_ADD = 6, IB_WR_LSO = 7, IB_WR_SEND_WITH_INV = 8, IB_WR_RDMA_READ_WITH_INV = 9, IB_WR_LOCAL_INV = 10, IB_WR_REG_MR = 11, IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, IB_WR_REG_SIG_MR = 14, IB_WR_RESERVED1 = 240, IB_WR_RESERVED2 = 241, IB_WR_RESERVED3 = 242, IB_WR_RESERVED4 = 243, IB_WR_RESERVED5 = 244, IB_WR_RESERVED6 = 245, IB_WR_RESERVED7 = 246, IB_WR_RESERVED8 = 247, IB_WR_RESERVED9 = 248, IB_WR_RESERVED10 = 249 } ;
1179 struct ib_sge { u64 addr; u32 length; u32 lkey; } ;
1190 struct ib_cqe { void (*done)(struct ib_cq *, struct ib_wc *); } ;
1194 union __anonunion____missing_field_name_603 { u64 wr_id; struct ib_cqe *wr_cqe; } ;
1194 union __anonunion_ex_604 { __be32 imm_data; u32 invalidate_rkey; } ;
1194 struct ib_send_wr { struct ib_send_wr *next; union __anonunion____missing_field_name_603 __annonCompField140; struct ib_sge *sg_list; int num_sge; enum ib_wr_opcode opcode; int send_flags; union __anonunion_ex_604 ex; } ;
1237 struct ib_ah ;
1254 struct ib_mr ;
1279 union __anonunion____missing_field_name_605 { u64 wr_id; struct ib_cqe *wr_cqe; } ;
1279 struct ib_recv_wr { struct ib_recv_wr *next; union __anonunion____missing_field_name_605 __annonCompField141; struct ib_sge *sg_list; int num_sge; } ;
1305 struct ib_fmr_attr { int max_pages; int max_maps; u8 page_shift; } ;
1315 struct ib_umem ;
1316 struct ib_ucontext { struct ib_device *device; struct list_head pd_list; struct list_head mr_list; struct list_head mw_list; struct list_head cq_list; struct list_head qp_list; struct list_head srq_list; struct list_head ah_list; struct list_head xrcd_list; struct list_head rule_list; struct list_head wq_list; struct list_head rwq_ind_tbl_list; int closing; struct pid *tgid; struct rb_root umem_tree; struct rw_semaphore umem_rwsem; void (*invalidate_range)(struct ib_umem *, unsigned long, unsigned long); struct mmu_notifier mn; atomic_t notifier_count; struct list_head no_private_counters; int odp_mrs_count; } ;
1350 struct ib_uobject { u64 user_handle; struct ib_ucontext *context; void *object; struct list_head list; int id; struct kref ref; struct rw_semaphore mutex; struct callback_head rcu; int live; } ;
1363 struct ib_udata { const void *inbuf; void *outbuf; size_t inlen; size_t outlen; } ;
1370 struct ib_pd { u32 local_dma_lkey; struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; struct ib_mr *local_mr; } ;
1378 struct ib_xrcd { struct ib_device *device; atomic_t usecnt; struct inode *inode; struct mutex tgt_qp_mutex; struct list_head tgt_qp_list; } ;
1387 struct ib_ah { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; } ;
1395 enum ib_poll_context { IB_POLL_DIRECT = 0, IB_POLL_SOFTIRQ = 1, IB_POLL_WORKQUEUE = 2 } ;
1401 union __anonunion____missing_field_name_606 { struct irq_poll iop; struct work_struct work; } ;
1401 struct ib_cq { struct ib_device *device; struct ib_uobject *uobject; void (*comp_handler)(struct ib_cq *, void *); void (*event_handler)(struct ib_event *, void *); void *cq_context; int cqe; atomic_t usecnt; enum ib_poll_context poll_ctx; struct ib_wc *wc; union __anonunion____missing_field_name_606 __annonCompField142; } ;
1417 struct __anonstruct_xrc_608 { struct ib_xrcd *xrcd; struct ib_cq *cq; u32 srq_num; } ;
1417 union __anonunion_ext_607 { struct __anonstruct_xrc_608 xrc; } ;
1417 struct ib_srq { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; void (*event_handler)(struct ib_event *, void *); void *srq_context; enum ib_srq_type srq_type; atomic_t usecnt; union __anonunion_ext_607 ext; } ;
1435 enum ib_wq_type { IB_WQT_RQ = 0 } ;
1439 enum ib_wq_state { IB_WQS_RESET = 0, IB_WQS_RDY = 1, IB_WQS_ERR = 2 } ;
1445 struct ib_wq { struct ib_device *device; struct ib_uobject *uobject; void *wq_context; void (*event_handler)(struct ib_event *, void *); struct ib_pd *pd; struct ib_cq *cq; u32 wq_num; enum ib_wq_state state; enum ib_wq_type wq_type; atomic_t usecnt; } ;
1458 struct ib_wq_init_attr { void *wq_context; enum ib_wq_type wq_type; u32 max_wr; u32 max_sge; struct ib_cq *cq; void (*event_handler)(struct ib_event *, void *); } ;
1472 struct ib_wq_attr { enum ib_wq_state wq_state; enum ib_wq_state curr_wq_state; } ;
1477 struct ib_rwq_ind_table { struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; u32 ind_tbl_num; u32 log_ind_tbl_size; struct ib_wq **ind_tbl; } ;
1486 struct ib_rwq_ind_table_init_attr { u32 log_ind_tbl_size; struct ib_wq **ind_tbl; } ;
1492 struct ib_qp { struct ib_device *device; struct ib_pd *pd; struct ib_cq *send_cq; struct ib_cq *recv_cq; spinlock_t mr_lock; int mrs_used; struct list_head rdma_mrs; struct list_head sig_mrs; struct ib_srq *srq; struct ib_xrcd *xrcd; struct list_head xrcd_list; atomic_t usecnt; struct list_head open_list; struct ib_qp *real_qp; struct ib_uobject *uobject; void (*event_handler)(struct ib_event *, void *); void *qp_context; u32 qp_num; u32 max_write_sge; u32 max_read_sge; enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; } ;
1523 union __anonunion____missing_field_name_609 { struct ib_uobject *uobject; struct list_head qp_entry; } ;
1523 struct ib_mr { struct ib_device *device; struct ib_pd *pd; u32 lkey; u32 rkey; u64 iova; u32 length; unsigned int page_size; bool need_inval; union __anonunion____missing_field_name_609 __annonCompField143; } ;
1538 struct ib_mw { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; u32 rkey; enum ib_mw_type type; } ;
1546 struct ib_fmr { struct ib_device *device; struct ib_pd *pd; struct list_head list; u32 lkey; u32 rkey; } ;
1554 enum ib_flow_attr_type { IB_FLOW_ATTR_NORMAL = 0, IB_FLOW_ATTR_ALL_DEFAULT = 1, IB_FLOW_ATTR_MC_DEFAULT = 2, IB_FLOW_ATTR_SNIFFER = 3 } ;
1675 struct ib_flow_attr { enum ib_flow_attr_type type; u16 size; u16 priority; u32 flags; u8 num_of_specs; u8 port; } ;
1684 struct ib_flow { struct ib_qp *qp; struct ib_uobject *uobject; } ;
1693 struct ib_mad_hdr ;
1707 struct ib_pkey_cache ;
1707 struct ib_gid_table ;
1707 struct ib_cache { rwlock_t lock; struct ib_event_handler event_handler; struct ib_pkey_cache **pkey_cache; struct ib_gid_table **gid_cache; u8 *lmc_cache; } ;
1719 struct ib_dma_mapping_ops { int (*mapping_error)(struct ib_device *, u64 ); u64 (*map_single)(struct ib_device *, void *, size_t , enum dma_data_direction ); void (*unmap_single)(struct ib_device *, u64 , size_t , enum dma_data_direction ); u64 (*map_page)(struct ib_device *, struct page *, unsigned long, size_t , enum dma_data_direction ); void (*unmap_page)(struct ib_device *, u64 , size_t , enum dma_data_direction ); int (*map_sg)(struct ib_device *, struct scatterlist *, int, enum dma_data_direction ); void (*unmap_sg)(struct ib_device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_single_for_cpu)(struct ib_device *, u64 , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct ib_device *, u64 , size_t , enum dma_data_direction ); void * (*alloc_coherent)(struct ib_device *, size_t , u64 *, gfp_t ); void (*free_coherent)(struct ib_device *, size_t , void *, u64 ); } ;
1756 struct iw_cm_verbs ;
1757 struct ib_port_immutable { int pkey_tbl_len; int gid_tbl_len; u32 core_cap_flags; u32 max_mad_size; } ;
1767 enum ldv_40285 { IB_DEV_UNINITIALIZED = 0, IB_DEV_REGISTERED = 1, IB_DEV_UNREGISTERED = 2 } ;
1773 struct ib_device { struct device *dma_device; char name[64U]; struct list_head event_handler_list; spinlock_t event_handler_lock; spinlock_t client_data_lock; struct list_head core_list; struct list_head client_data_list; struct ib_cache cache; struct ib_port_immutable *port_immutable; int num_comp_vectors; struct iw_cm_verbs *iwcm; struct rdma_hw_stats * (*alloc_hw_stats)(struct ib_device *, u8 ); int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u8 , int); int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *); int (*query_port)(struct ib_device *, u8 , struct ib_port_attr *); enum rdma_link_layer (*get_link_layer)(struct ib_device *, u8 ); struct net_device * (*get_netdev)(struct ib_device *, u8 ); int (*query_gid)(struct ib_device *, u8 , int, union ib_gid *); int (*add_gid)(struct ib_device *, u8 , unsigned int, const union ib_gid *, const struct ib_gid_attr *, void **); int (*del_gid)(struct ib_device *, u8 , unsigned int, void **); int (*query_pkey)(struct ib_device *, u8 , u16 , u16 *); int (*modify_device)(struct ib_device *, int, struct ib_device_modify *); int (*modify_port)(struct ib_device *, u8 , int, struct ib_port_modify *); struct ib_ucontext * (*alloc_ucontext)(struct ib_device *, struct ib_udata *); int (*dealloc_ucontext)(struct ib_ucontext *); int (*mmap)(struct ib_ucontext *, struct vm_area_struct *); struct ib_pd * (*alloc_pd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *); int (*dealloc_pd)(struct ib_pd *); struct ib_ah * (*create_ah)(struct ib_pd *, struct ib_ah_attr *); int (*modify_ah)(struct ib_ah *, struct ib_ah_attr *); int (*query_ah)(struct ib_ah *, struct ib_ah_attr *); int (*destroy_ah)(struct ib_ah *); struct ib_srq * (*create_srq)(struct ib_pd *, struct ib_srq_init_attr *, struct ib_udata *); int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask , struct ib_udata *); int (*query_srq)(struct ib_srq *, struct ib_srq_attr *); int (*destroy_srq)(struct ib_srq *); int (*post_srq_recv)(struct ib_srq *, struct ib_recv_wr *, struct ib_recv_wr **); struct ib_qp * (*create_qp)(struct ib_pd *, struct ib_qp_init_attr *, struct ib_udata *); int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *); int (*destroy_qp)(struct ib_qp *); int (*post_send)(struct ib_qp *, struct ib_send_wr *, struct ib_send_wr **); int (*post_recv)(struct ib_qp *, struct ib_recv_wr *, struct ib_recv_wr **); struct ib_cq * (*create_cq)(struct ib_device *, const struct ib_cq_init_attr *, struct ib_ucontext *, struct ib_udata *); int (*modify_cq)(struct ib_cq *, u16 , u16 ); int (*destroy_cq)(struct ib_cq *); int (*resize_cq)(struct ib_cq *, int, struct ib_udata *); int (*poll_cq)(struct ib_cq *, int, struct ib_wc *); int (*peek_cq)(struct ib_cq *, int); int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags ); int (*req_ncomp_notif)(struct ib_cq *, int); struct ib_mr * (*get_dma_mr)(struct ib_pd *, int); struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64 , u64 , u64 , int, struct ib_udata *); int (*rereg_user_mr)(struct ib_mr *, int, u64 , u64 , u64 , int, struct ib_pd *, struct ib_udata *); int (*dereg_mr)(struct ib_mr *); struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type , u32 ); int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *); struct ib_mw * (*alloc_mw)(struct ib_pd *, enum ib_mw_type , struct ib_udata *); int (*dealloc_mw)(struct ib_mw *); struct ib_fmr * (*alloc_fmr)(struct ib_pd *, int, struct ib_fmr_attr *); int (*map_phys_fmr)(struct ib_fmr *, u64 *, int, u64 ); int (*unmap_fmr)(struct list_head *); int (*dealloc_fmr)(struct ib_fmr *); int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16 ); int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16 ); int (*process_mad)(struct ib_device *, int, u8 , const struct ib_wc *, const struct ib_grh *, const struct ib_mad_hdr *, size_t , struct ib_mad_hdr *, size_t *, u16 *); struct ib_xrcd * (*alloc_xrcd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *); int (*dealloc_xrcd)(struct ib_xrcd *); struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, int); int (*destroy_flow)(struct ib_flow *); int (*check_mr_status)(struct ib_mr *, u32 , struct ib_mr_status *); void (*disassociate_ucontext)(struct ib_ucontext *); void (*drain_rq)(struct ib_qp *); void (*drain_sq)(struct ib_qp *); int (*set_vf_link_state)(struct ib_device *, int, u8 , int); int (*get_vf_config)(struct ib_device *, int, u8 , struct ifla_vf_info *); int (*get_vf_stats)(struct ib_device *, int, u8 , struct ifla_vf_stats *); int (*set_vf_guid)(struct ib_device *, int, u8 , u64 , int); struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *); int (*destroy_wq)(struct ib_wq *); int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32 , struct ib_udata *); struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *); struct ib_dma_mapping_ops *dma_ops; struct module *owner; struct device dev; struct kobject *ports_parent; struct list_head port_list; enum ldv_40285 reg_state; int uverbs_abi_ver; u64 uverbs_cmd_mask; u64 uverbs_ex_cmd_mask; char node_desc[64U]; __be64 node_guid; u32 local_dma_lkey; unsigned char is_switch; u8 node_type; u8 phys_port_cnt; struct ib_device_attr attrs; struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; int (*get_port_immutable)(struct ib_device *, u8 , struct ib_port_immutable *); void (*get_dev_fw_str)(struct ib_device *, char *, size_t ); } ;
249 struct ib_mad_hdr { u8 base_version; u8 mgmt_class; u8 class_version; u8 method; __be16 status; __be16 class_specific; __be64 tid; __be16 attr_id; __be16 resv; __be32 attr_mod; } ;
148 struct ib_umem_odp ;
149 struct ib_umem { struct ib_ucontext *context; size_t length; unsigned long address; int page_size; int writable; int hugetlb; struct work_struct work; struct pid *pid; struct mm_struct *mm; unsigned long diff; struct ib_umem_odp *odp_data; struct sg_table sg_head; int nmap; int npages; } ;
165 struct ipv4_devconf { void *sysctl; int data[31U]; unsigned long state[1U]; } ;
20 struct ip_mc_list ;
20 struct in_device { struct net_device *dev; atomic_t refcnt; int dead; struct in_ifaddr *ifa_list; struct ip_mc_list *mc_list; struct ip_mc_list **mc_hash; int mc_count; spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned char mr_qrv; unsigned char mr_gq_running; unsigned char mr_ifc_count; struct timer_list mr_gq_timer; struct timer_list mr_ifc_timer; struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct callback_head callback_head; } ;
71 struct in_ifaddr { struct hlist_node hash; struct in_ifaddr *ifa_next; struct in_device *ifa_dev; struct callback_head callback_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; __u32 ifa_flags; char ifa_label[16U]; __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; unsigned long ifa_tstamp; } ;
48 struct rxe_dev ;
128 struct rxe_qp ;
128 struct rxe_send_wqe ;
128 struct rxe_pkt_info { struct rxe_dev *rxe; struct rxe_qp *qp; struct rxe_send_wqe *wqe; u8 *hdr; u32 mask; u32 psn; u16 pkey_index; u16 paylen; u8 port_num; u8 opcode; u8 offset; } ;
149 struct __anonstruct_global_641 { __be64 subnet_prefix; __be64 interface_id; } ;
149 union rxe_gid { __u8 raw[16U]; struct __anonstruct_global_641 global; } ;
45 struct rxe_global_route { union rxe_gid dgid; __u32 flow_label; __u8 sgid_index; __u8 hop_limit; __u8 traffic_class; } ;
53 union __anonunion_sgid_addr_642 { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } ;
53 union __anonunion_dgid_addr_643 { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } ;
53 struct rxe_av { __u8 port_num; __u8 network_type; struct rxe_global_route grh; union __anonunion_sgid_addr_642 sgid_addr; union __anonunion_dgid_addr_643 dgid_addr; } ;
64 union __anonunion_ex_644 { __be32 imm_data; __u32 invalidate_rkey; } ;
64 struct __anonstruct_rdma_646 { __u64 remote_addr; __u32 rkey; } ;
64 struct __anonstruct_atomic_647 { __u64 remote_addr; __u64 compare_add; __u64 swap; __u32 rkey; } ;
64 struct __anonstruct_ud_648 { __u32 remote_qpn; __u32 remote_qkey; __u16 pkey_index; } ;
64 struct __anonstruct_reg_649 { struct ib_mr *mr; __u32 key; int access; } ;
64 union __anonunion_wr_645 { struct __anonstruct_rdma_646 rdma; struct __anonstruct_atomic_647 atomic; struct __anonstruct_ud_648 ud; struct __anonstruct_reg_649 reg; } ;
64 struct rxe_send_wr { __u64 wr_id; __u32 num_sge; __u32 opcode; __u32 send_flags; union __anonunion_ex_644 ex; union __anonunion_wr_645 wr; } ;
97 struct rxe_sge { __u64 addr; __u32 length; __u32 lkey; } ;
109 union __anonunion____missing_field_name_650 { __u8 inline_data[0U]; struct rxe_sge sge[0U]; } ;
109 struct rxe_dma_info { __u32 length; __u32 resid; __u32 cur_sge; __u32 num_sge; __u32 sge_offset; union __anonunion____missing_field_name_650 __annonCompField154; } ;
121 struct rxe_send_wqe { struct rxe_send_wr wr; struct rxe_av av; __u32 status; __u32 state; __u64 iova; __u32 mask; __u32 first_psn; __u32 last_psn; __u32 ack_length; __u32 ssn; __u32 has_rd_atomic; struct rxe_dma_info dma; } ;
136 struct rxe_recv_wqe { __u64 wr_id; __u32 num_sge; __u32 padding; struct rxe_dma_info dma; } ;
143 enum rxe_pool_flags { RXE_POOL_ATOMIC = 1, RXE_POOL_INDEX = 2, RXE_POOL_KEY = 4 } ;
149 enum rxe_elem_type { RXE_TYPE_UC = 0, RXE_TYPE_PD = 1, RXE_TYPE_AH = 2, RXE_TYPE_SRQ = 3, RXE_TYPE_QP = 4, RXE_TYPE_CQ = 5, RXE_TYPE_MR = 6, RXE_TYPE_MW = 7, RXE_TYPE_MC_GRP = 8, RXE_TYPE_MC_ELEM = 9, RXE_NUM_TYPES = 10 } ;
73 enum rxe_pool_state { rxe_pool_invalid = 0, rxe_pool_valid = 1 } ;
78 struct rxe_pool ;
78 struct rxe_pool_entry { struct rxe_pool *pool; struct kref ref_cnt; struct list_head list; struct rb_node node; u32 index; } ;
88 struct rxe_pool { struct rxe_dev *rxe; spinlock_t pool_lock; size_t elem_size; struct kref ref_cnt; void (*cleanup)(void *); enum rxe_pool_state state; enum rxe_pool_flags flags; enum rxe_elem_type type; unsigned int max_elem; atomic_t num_elem; struct rb_root tree; unsigned long *table; size_t table_size; u32 max_index; u32 min_index; u32 last; size_t key_offset; size_t key_size; } ;
162 struct rxe_task { void *obj; struct tasklet_struct tasklet; int state; spinlock_t state_lock; void *arg; int (*func)(void *); int ret; char name[16U]; } ;
65 struct rxe_pd { struct rxe_pool_entry pelem; struct ib_pd ibpd; } ;
84 struct rxe_queue ;
84 struct rxe_cq { struct rxe_pool_entry pelem; struct ib_cq ibcq; struct rxe_queue *queue; spinlock_t cq_lock; u8 notify; int is_user; struct tasklet_struct comp_task; } ;
102 struct rxe_sq { int max_wr; int max_sge; int max_inline; spinlock_t sq_lock; struct rxe_queue *queue; } ;
110 struct rxe_rq { int max_wr; int max_sge; spinlock_t producer_lock; spinlock_t consumer_lock; struct rxe_queue *queue; } ;
118 struct rxe_srq { struct rxe_pool_entry pelem; struct ib_srq ibsrq; struct rxe_pd *pd; struct rxe_rq rq; u32 srq_num; int limit; int error; } ;
129 enum rxe_qp_state { QP_STATE_RESET = 0, QP_STATE_INIT = 1, QP_STATE_READY = 2, QP_STATE_DRAIN = 3, QP_STATE_DRAINED = 4, QP_STATE_ERROR = 5 } ;
140 struct rxe_req_info { enum rxe_qp_state state; int wqe_index; u32 psn; int opcode; atomic_t rd_atomic; int wait_fence; int need_rd_atomic; int wait_psn; int need_retry; int noack_pkts; struct rxe_task task; } ;
154 struct rxe_comp_info { u32 psn; int opcode; int timeout; int timeout_retry; u32 retry_cnt; u32 rnr_retry; struct rxe_task task; } ;
164 enum rdatm_res_state { rdatm_res_state_next = 0, rdatm_res_state_new = 1, rdatm_res_state_replay = 2 } ;
170 struct __anonstruct_atomic_653 { struct sk_buff *skb; } ;
170 struct rxe_mem ;
170 struct __anonstruct_read_654 { struct rxe_mem *mr; u64 va_org; u32 rkey; u32 length; u64 va; u32 resid; } ;
170 union __anonunion____missing_field_name_652 { struct __anonstruct_atomic_653 atomic; struct __anonstruct_read_654 read; } ;
170 struct resp_res { int type; u32 first_psn; u32 last_psn; u32 cur_psn; enum rdatm_res_state state; union __anonunion____missing_field_name_652 __annonCompField156; } ;
192 struct __anonstruct_srq_wqe_655 { struct rxe_recv_wqe wqe; struct ib_sge sge[32U]; } ;
192 struct rxe_resp_info { enum rxe_qp_state state; u32 msn; u32 psn; int opcode; int drop_msg; int goto_error; int sent_psn_nak; enum ib_wc_status status; u8 aeth_syndrome; struct rxe_recv_wqe *wqe; u64 va; struct rxe_mem *mr; u32 resid; u32 rkey; u64 atomic_orig; struct __anonstruct_srq_wqe_655 srq_wqe; struct resp_res *resources; unsigned int res_head; unsigned int res_tail; struct resp_res *res; struct rxe_task task; } ;
229 struct rxe_qp { struct rxe_pool_entry pelem; struct ib_qp ibqp; struct ib_qp_attr attr; unsigned int valid; unsigned int mtu; int is_user; struct rxe_pd *pd; struct rxe_srq *srq; struct rxe_cq *scq; struct rxe_cq *rcq; enum ib_sig_type sq_sig_type; struct rxe_sq sq; struct rxe_rq rq; struct socket *sk; struct rxe_av pri_av; struct rxe_av alt_av; struct list_head grp_list; spinlock_t grp_lock; struct sk_buff_head req_pkts; struct sk_buff_head resp_pkts; struct sk_buff_head send_pkts; struct rxe_req_info req; struct rxe_comp_info comp; struct rxe_resp_info resp; atomic_t ssn; atomic_t skb_out; int need_req_skb; struct timer_list retrans_timer; u64 qp_timeout_jiffies; struct timer_list rnr_nak_timer; spinlock_t state_lock; } ;
282 enum rxe_mem_state { RXE_MEM_STATE_ZOMBIE = 0, RXE_MEM_STATE_INVALID = 1, RXE_MEM_STATE_FREE = 2, RXE_MEM_STATE_VALID = 3 } ;
289 enum rxe_mem_type { RXE_MEM_TYPE_NONE = 0, RXE_MEM_TYPE_DMA = 1, RXE_MEM_TYPE_MR = 2, RXE_MEM_TYPE_FMR = 3, RXE_MEM_TYPE_MW = 4 } ;
297 struct rxe_phys_buf { u64 addr; u64 size; } ;
304 struct rxe_map { struct rxe_phys_buf buf[256U]; } ;
308 union __anonunion____missing_field_name_656 { struct ib_mr ibmr; struct ib_mw ibmw; } ;
308 struct rxe_mem { struct rxe_pool_entry pelem; union __anonunion____missing_field_name_656 __annonCompField157; struct rxe_pd *pd; struct ib_umem *umem; u32 lkey; u32 rkey; enum rxe_mem_state state; enum rxe_mem_type type; u64 va; u64 iova; size_t length; u32 offset; int access; int page_shift; int page_mask; int map_shift; int map_mask; u32 num_buf; u32 nbuf; u32 max_buf; u32 num_map; struct rxe_map **map; } ;
362 struct rxe_port { struct ib_port_attr attr; u16 *pkey_tbl; __be64 port_guid; __be64 subnet_prefix; spinlock_t port_lock; unsigned int mtu_cap; u32 qp_smi_index; u32 qp_gsi_index; } ;
374 struct rxe_ifc_ops { void (*release)(struct rxe_dev *); __be64 (*node_guid)(struct rxe_dev *); __be64 (*port_guid)(struct rxe_dev *); struct device * (*dma_device)(struct rxe_dev *); int (*mcast_add)(struct rxe_dev *, union ib_gid *); int (*mcast_delete)(struct rxe_dev *, union ib_gid *); int (*prepare)(struct rxe_dev *, struct rxe_pkt_info *, struct sk_buff *, u32 *); int (*send)(struct rxe_dev *, struct rxe_pkt_info *, struct sk_buff *); int (*loopback)(struct sk_buff *); struct sk_buff * (*init_packet)(struct rxe_dev *, struct rxe_av *, int, struct rxe_pkt_info *); char * (*parent_name)(struct rxe_dev *, unsigned int); enum rdma_link_layer (*link_layer)(struct rxe_dev *, unsigned int); } ;
393 struct rxe_dev { struct ib_device ib_dev; struct ib_device_attr attr; int max_ucontext; int max_inline_data; struct kref ref_cnt; struct mutex usdev_lock; struct rxe_ifc_ops *ifc_ops; struct net_device *ndev; int xmit_errors; struct rxe_pool uc_pool; struct rxe_pool pd_pool; struct rxe_pool ah_pool; struct rxe_pool srq_pool; struct rxe_pool qp_pool; struct rxe_pool cq_pool; struct rxe_pool mr_pool; struct rxe_pool mw_pool; struct rxe_pool mc_grp_pool; struct rxe_pool mc_elem_pool; spinlock_t pending_lock; struct list_head pending_mmaps; spinlock_t mmap_offset_lock; int mmap_offset; struct rxe_port port; struct list_head list; } ;
279 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
405 union __anonunion_ex_612 { __u32 imm_data; __u32 invalidate_rkey; } ;
405 struct ib_uverbs_wc { __u64 wr_id; __u32 status; __u32 opcode; __u32 vendor_err; __u32 byte_len; union __anonunion_ex_612 ex; __u32 qp_num; __u32 src_qp; __u32 wc_flags; __u16 pkey_index; __u16 slid; __u8 sl; __u8 dlid_path_bits; __u8 port_num; __u8 reserved; } ;
82 enum rxe_hdr_mask { RXE_LRH_MASK = 1, RXE_GRH_MASK = 2, RXE_BTH_MASK = 4, RXE_IMMDT_MASK = 1024, RXE_RETH_MASK = 8, RXE_AETH_MASK = 16, RXE_ATMETH_MASK = 32, RXE_ATMACK_MASK = 64, RXE_IETH_MASK = 128, RXE_RDETH_MASK = 256, RXE_DETH_MASK = 512, RXE_PAYLOAD_MASK = 2048, RXE_REQ_MASK = 4096, RXE_ACK_MASK = 8192, RXE_SEND_MASK = 16384, RXE_WRITE_MASK = 32768, RXE_READ_MASK = 65536, RXE_ATOMIC_MASK = 131072, RXE_RWR_MASK = 262144, RXE_COMP_MASK = 524288, RXE_START_MASK = 1048576, RXE_MIDDLE_MASK = 2097152, RXE_END_MASK = 4194304, RXE_LOOPBACK_MASK = 16777216, RXE_READ_OR_ATOMIC = 196608, RXE_WRITE_OR_SEND = 49152 } ;
111 struct rxe_opcode_info { char *name; enum rxe_hdr_mask mask; int length; int offset[12U]; } ;
54 struct rxe_bth { u8 opcode; u8 flags; __be16 pkey; __be32 qpn; __be32 apsn; } ;
743 struct rxe_aeth { __be32 smsn; } ;
823 struct rxe_atmack { __be64 orig; } ;
103 struct mminfo { __u64 offset; __u32 size; __u32 pad; } ;
77 union __anonunion____missing_field_name_651 { struct ib_wc ibwc; struct ib_uverbs_wc uibwc; } ;
77 struct rxe_cqe { union __anonunion____missing_field_name_651 __annonCompField155; } ;
82 struct rxe_mmap_info { struct list_head pending_mmaps; struct ib_ucontext *context; struct kref ref; void *obj; struct mminfo info; } ;
101 enum copy_direction { to_mem_obj = 0, from_mem_obj = 1 } ;
285 struct rxe_queue_buf { __u32 log2_elem_size; __u32 index_mask; __u32 pad_1[30U]; __u32 producer_index; __u32 pad_2[31U]; __u32 consumer_index; __u32 pad_3[31U]; __u8 data[0U]; } ;
68 struct rxe_queue { struct rxe_dev *rxe; struct rxe_queue_buf *buf; struct rxe_mmap_info *ip; size_t buf_size; size_t elem_size; unsigned int log2_elem_size; unsigned int index_mask; } ;
177 enum comp_state { COMPST_GET_ACK = 0, COMPST_GET_WQE = 1, COMPST_COMP_WQE = 2, COMPST_COMP_ACK = 3, COMPST_CHECK_PSN = 4, COMPST_CHECK_ACK = 5, COMPST_READ = 6, COMPST_ATOMIC = 7, COMPST_WRITE_SEND = 8, COMPST_UPDATE_COMP = 9, COMPST_ERROR_RETRY = 10, COMPST_RNR_RETRY = 11, COMPST_ERROR = 12, COMPST_EXIT = 13, COMPST_DONE = 14 } ;
26 union __anonunion___u_42 { int __val; char __c[1U]; } ;
52 enum rxe_wr_mask { WR_INLINE_MASK = 1, WR_ATOMIC_MASK = 2, WR_SEND_MASK = 4, WR_READ_MASK = 8, WR_WRITE_MASK = 16, WR_LOCAL_MASK = 32, WR_REG_MASK = 64, WR_READ_OR_WRITE_MASK = 24, WR_READ_WRITE_OR_SEND_MASK = 28, WR_WRITE_OR_SEND_MASK = 20, WR_ATOMIC_OR_READ_MASK = 10 } ;
66 struct rxe_wr_opcode_info { char *name; enum rxe_wr_mask mask[8U]; } ;
479 struct rxe_deth { __be32 qkey; __be32 sqp; } ;
542 struct rxe_reth { __be64 va; __be32 rkey; __be32 len; } ;
629 struct rxe_atmeth { __be64 va; __be32 rkey; __be64 swap_add; __be64 comp; } ;
856 struct rxe_immdt { __be32 imm; } ;
889 struct rxe_ieth { __be32 rkey; } ;
94 enum wqe_state { wqe_state_posted = 0, wqe_state_processing = 1, wqe_state_pending = 2, wqe_state_done = 3, wqe_state_error = 4 } ;
39 typedef __u16 __sum16;
43 struct ratelimit_state { raw_spinlock_t lock; int interval; int burst; int printed; int missed; unsigned long begin; unsigned long flags; } ;
106 struct ipv6hdr { unsigned char priority; unsigned char version; __u8 flow_lbl[3U]; __be16 payload_len; __u8 nexthdr; __u8 hop_limit; struct in6_addr saddr; struct in6_addr daddr; } ;
1047 struct iphdr { unsigned char ihl; unsigned char version; __u8 tos; __be16 tot_len; __be16 id; __be16 frag_off; __u8 ttl; __u8 protocol; __sum16 check; __be32 saddr; __be32 daddr; } ;
611 struct __anonstruct____missing_field_name_597 { u8 reserved[20U]; struct iphdr roce4grh; } ;
611 union rdma_network_hdr { struct ib_grh ibgrh; struct __anonstruct____missing_field_name_597 __annonCompField138; } ;
126 enum lookup_type { lookup_local = 0, lookup_remote = 1 } ;
177 enum resp_states { RESPST_NONE = 0, RESPST_GET_REQ = 1, RESPST_CHK_PSN = 2, RESPST_CHK_OP_SEQ = 3, RESPST_CHK_OP_VALID = 4, RESPST_CHK_RESOURCE = 5, RESPST_CHK_LENGTH = 6, RESPST_CHK_RKEY = 7, RESPST_EXECUTE = 8, RESPST_READ_REPLY = 9, RESPST_COMPLETE = 10, RESPST_ACKNOWLEDGE = 11, RESPST_CLEANUP = 12, RESPST_DUPLICATE_REQUEST = 13, RESPST_ERR_MALFORMED_WQE = 14, RESPST_ERR_UNSUPPORTED_OPCODE = 15, RESPST_ERR_MISALIGNED_ATOMIC = 16, RESPST_ERR_PSN_OUT_OF_SEQ = 17, RESPST_ERR_MISSING_OPCODE_FIRST = 18, RESPST_ERR_MISSING_OPCODE_LAST_C = 19, RESPST_ERR_MISSING_OPCODE_LAST_D1E = 20, RESPST_ERR_TOO_MANY_RDMA_ATM_REQ = 21, RESPST_ERR_RNR = 22, RESPST_ERR_RKEY_VIOLATION = 23, RESPST_ERR_LENGTH = 24, RESPST_ERR_CQ_OVERFLOW = 25, RESPST_ERROR = 26, RESPST_RESET = 27, RESPST_DONE = 28, RESPST_EXIT = 29 } ;
343 struct rxe_mc_grp { struct rxe_pool_entry pelem; spinlock_t mcg_lock; struct rxe_dev *rxe; struct list_head qp_list; union ib_gid mgid; int num_qp; u32 qkey; u16 pkey; } ;
354 struct rxe_mc_elem { struct rxe_pool_entry pelem; struct list_head qp_list; struct list_head grp_list; struct rxe_qp *qp; struct rxe_mc_grp *grp; } ;
26 union __anonunion___u_42___0 { int __val; char __c[1U]; } ;
38 union __anonunion___u_44___0 { int __val; char __c[1U]; } ;
163 struct rxe_type_info { char *name; size_t size; void (*cleanup)(void *); enum rxe_pool_flags flags; u32 max_index; u32 min_index; size_t key_offset; size_t key_size; struct kmem_cache *cache; } ;
685 struct cpuinfo_x86 ;
80 struct cpuinfo_x86 { __u8 x86; __u8 x86_vendor; __u8 x86_model; __u8 x86_mask; int x86_tlbsize; __u8 x86_virt_bits; __u8 x86_phys_bits; __u8 x86_coreid_bits; __u32 extended_cpuid_level; int cpuid_level; __u32 x86_capability[19U]; char x86_vendor_id[16U]; char x86_model_id[64U]; int x86_cache_size; int x86_cache_alignment; int x86_cache_max_rmid; int x86_cache_occ_scale; int x86_power; unsigned long loops_per_jiffy; u16 x86_max_cores; u16 apicid; u16 initial_apicid; u16 x86_clflush_size; u16 booted_cores; u16 phys_proc_id; u16 logical_proc_id; u16 cpu_core_id; u16 cpu_index; u32 microcode; } ;
3319 typedef int pao_T_____33;
3319 typedef int pao_T_____34;
3319 typedef int pao_T_____35;
3319 typedef int pao_T_____36;
3330 typedef int pao_T_____37;
3330 typedef int pao_T_____38;
3330 typedef int pao_T_____39;
3330 typedef int pao_T_____40;
1210 struct ib_rdma_wr { struct ib_send_wr wr; u64 remote_addr; u32 rkey; } ;
1222 struct ib_atomic_wr { struct ib_send_wr wr; u64 remote_addr; u64 compare_add; u64 swap; u64 compare_add_mask; u64 swap_mask; u32 rkey; } ;
1237 struct ib_ud_wr { struct ib_send_wr wr; struct ib_ah *ah; void *header; int hlen; int mss; u32 remote_qpn; u32 remote_qkey; u16 pkey_index; u8 port_num; } ;
1254 struct ib_reg_wr { struct ib_send_wr wr; struct ib_mr *mr; u32 key; int access; } ;
60 struct rxe_ucontext { struct rxe_pool_entry pelem; struct ib_ucontext ibuc; } ;
70 struct rxe_ah { struct rxe_pool_entry pelem; struct ib_ah ibah; struct rxe_pd *pd; struct rxe_av av; } ;
122 enum rdma_network_type { RDMA_NETWORK_IB = 0, RDMA_NETWORK_ROCE_V1 = 0, RDMA_NETWORK_IPV4 = 1, RDMA_NETWORK_IPV6 = 2 } ;
27 union __anonunion___u_9___0 { struct list_head *__val; char __c[1U]; } ;
38 union __anonunion___u_44___1 { int __val; char __c[1U]; } ;
66 enum sock_shutdown_cmd { SHUT_RD = 0, SHUT_WR = 1, SHUT_RDWR = 2 } ;
1064 enum ib_qp_attr_mask { IB_QP_STATE = 1, IB_QP_CUR_STATE = 2, IB_QP_EN_SQD_ASYNC_NOTIFY = 4, IB_QP_ACCESS_FLAGS = 8, IB_QP_PKEY_INDEX = 16, IB_QP_PORT = 32, IB_QP_QKEY = 64, IB_QP_AV = 128, IB_QP_PATH_MTU = 256, IB_QP_TIMEOUT = 512, IB_QP_RETRY_CNT = 1024, IB_QP_RNR_RETRY = 2048, IB_QP_RQ_PSN = 4096, IB_QP_MAX_QP_RD_ATOMIC = 8192, IB_QP_ALT_PATH = 16384, IB_QP_MIN_RNR_TIMER = 32768, IB_QP_SQ_PSN = 65536, IB_QP_MAX_DEST_RD_ATOMIC = 131072, IB_QP_PATH_MIG_STATE = 262144, IB_QP_CAP = 524288, IB_QP_DEST_QPN = 1048576, IB_QP_RESERVED1 = 2097152, IB_QP_RESERVED2 = 4194304, IB_QP_RESERVED3 = 8388608, IB_QP_RESERVED4 = 16777216 } ;
27 union __anonunion___u_9___1 { struct list_head *__val; char __c[1U]; } ;
189 union __anonunion___u_13 { struct list_head *__val; char __c[1U]; } ;
38 union __anonunion___u_44___2 { int __val; char __c[1U]; } ;
424 struct udphdr { __be16 source; __be16 dest; __be16 len; __sum16 check; } ;
27 union __anonunion___u_9___2 { struct list_head *__val; char __c[1U]; } ;
189 union __anonunion___u_13___0 { struct list_head *__val; char __c[1U]; } ;
189 union __anonunion___u_13___1 { struct list_head *__val; char __c[1U]; } ;
310 struct skb_frag_struct ;
310 typedef struct skb_frag_struct skb_frag_t;
311 struct __anonstruct_page_395 { struct page *p; } ;
311 struct skb_frag_struct { struct __anonstruct_page_395 page; __u32 page_offset; __u32 size; } ;
344 struct skb_shared_hwtstamps { ktime_t hwtstamp; } ;
410 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; u32 tskey; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ;
2281 struct netdev_notifier_info { struct net_device *dev; } ;
201 struct ip_options { __be32 faddr; __be32 nexthop; unsigned char optlen; unsigned char srr; unsigned char rr; unsigned char ts; unsigned char is_strictroute; unsigned char srr_is_hit; unsigned char is_changed; unsigned char rr_needaddr; unsigned char ts_needtime; unsigned char ts_needaddr; unsigned char router_alert; unsigned char cipso; unsigned char __pad2; unsigned char __data[0U]; } ;
239 struct net_generic { unsigned int len; struct callback_head rcu; void *ptr[0U]; } ;
345 struct lwtunnel_state { __u16 type; __u16 flags; atomic_t refcnt; int (*orig_output)(struct net *, struct sock *, struct sk_buff *); int (*orig_input)(struct sk_buff *); int len; __u8 data[0U]; } ;
57 struct fib6_node { struct fib6_node *parent; struct fib6_node *left; struct fib6_node *right; struct fib6_node *subtree; struct rt6_info *leaf; __u16 fn_bit; __u16 fn_flags; int fn_sernum; struct rt6_info *rr_ptr; } ;
83 struct rt6key { struct in6_addr addr; int plen; } ;
93 struct rt6_info { struct dst_entry dst; struct fib6_table *rt6i_table; struct fib6_node *rt6i_node; struct in6_addr rt6i_gateway; struct list_head rt6i_siblings; unsigned int rt6i_nsiblings; atomic_t rt6i_ref; struct rt6key rt6i_dst; u32 rt6i_flags; struct rt6key rt6i_src; struct rt6key rt6i_prefsrc; struct list_head rt6i_uncached; struct uncached_list *rt6i_uncached_list; struct inet6_dev *rt6i_idev; struct rt6_info **rt6i_pcpu; u32 rt6i_metric; u32 rt6i_pmtu; unsigned short rt6i_nfheader_len; u8 rt6i_protocol; } ;
207 struct rt6_statistics { __u32 fib_nodes; __u32 fib_route_nodes; __u32 fib_rt_alloc; __u32 fib_rt_entries; __u32 fib_rt_cache; __u32 fib_discarded_routes; } ;
216 struct fib6_table { struct hlist_node tb6_hlist; u32 tb6_id; rwlock_t tb6_lock; struct fib6_node tb6_root; struct inet_peer_base tb6_peers; } ;
20 struct prefix_info { __u8 type; __u8 length; __u8 prefix_len; unsigned char reserved; unsigned char autoconf; unsigned char onlink; __be32 valid; __be32 prefered; __be32 reserved2; struct in6_addr prefix; } ;
196 struct ipv6_stub { int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); int (*ipv6_dst_lookup)(struct net *, struct sock *, struct dst_entry **, struct flowi6 *); void (*udpv6_encap_enable)(); void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool , bool , bool , bool ); struct neigh_table *nd_tbl; } ;
430 struct inet_skb_parm { int iif; struct ip_options opt; unsigned char flags; u16 frag_max_size; } ;
52 struct udp_hslot { struct hlist_head head; int count; spinlock_t lock; } ;
66 struct udp_table { struct udp_hslot *hash; struct udp_hslot *hash2; unsigned int mask; unsigned int log; } ;
353 union __anonunion____missing_field_name_613 { struct in_addr local_ip; struct in6_addr local_ip6; } ;
353 union __anonunion____missing_field_name_614 { struct in_addr peer_ip; struct in6_addr peer_ip6; } ;
353 struct udp_port_cfg { u8 family; union __anonunion____missing_field_name_613 __annonCompField138; union __anonunion____missing_field_name_614 __annonCompField139; __be16 local_udp_port; __be16 peer_udp_port; unsigned char use_udp_checksums; unsigned char use_udp6_tx_checksums; unsigned char use_udp6_rx_checksums; unsigned char ipv6_v6only; } ;
71 struct udp_tunnel_sock_cfg { void *sk_user_data; __u8 encap_type; int (*encap_rcv)(struct sock *, struct sk_buff *); void (*encap_destroy)(struct sock *); struct sk_buff ** (*gro_receive)(struct sock *, struct sk_buff **, struct sk_buff *); int (*gro_complete)(struct sock *, struct sk_buff *, int); } ;
91 struct udp_tunnel_info { unsigned short type; sa_family_t sa_family; __be16 port; } ;
165 struct rxe_recv_sockets { struct socket *sk4; struct socket *sk6; } ;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long exp, long c);
243 void __write_once_size(volatile void *p, void *res, int size);
275 void __pr_err(const char *, ...);
278 void __pr_info(const char *, ...);
26 void * ldv_undef_ptr();
25 void INIT_LIST_HEAD(struct list_head *list);
71 void warn_slowpath_null(const char *, const int);
36 void atomic_set(atomic_t *v, int i);
78 bool atomic_sub_and_test(int i, atomic_t *v);
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
289 raw_spinlock_t * spinlock_check(spinlock_t *lock);
119 void __mutex_init(struct mutex *, const char *, struct lock_class_key *);
31 void kref_init(struct kref *kref);
67 int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *));
96 int kref_put(struct kref *kref, void (*release)(struct kref *));
154 void kfree(const void *);
322 void * ldv_kmem_cache_alloc_20(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_40(struct kmem_cache *ldv_func_arg1, gfp_t flags);
579 void * kcalloc(size_t n, size_t size, gfp_t flags);
18 void ldv_check_alloc_flags(gfp_t flags);
971 struct sk_buff * ldv_skb_clone_30(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_39(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_32(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_27(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_28(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_36(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_37(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_38(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_33(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_34(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_35(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
331 int ib_mtu_enum_to_int(enum ib_mtu mtu);
2087 void ib_dealloc_device(struct ib_device *);
50 int rxe_net_init();
51 void rxe_net_exit();
37 enum ib_mtu rxe_mtu_int_to_enum(int mtu);
54 enum ib_mtu eth_mtu_int_to_enum(int mtu);
114 int rxe_cache_init();
117 void rxe_cache_exit();
123 int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, enum rxe_elem_type type, unsigned int max_elem);
127 int rxe_pool_cleanup(struct rxe_pool *pool);
475 int rxe_register_device(struct rxe_dev *rxe);
476 int rxe_unregister_device(struct rxe_dev *rxe);
62 int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu);
64 int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
65 void rxe_remove(struct rxe_dev *rxe);
66 void rxe_remove_all();
70 void rxe_dev_put(struct rxe_dev *rxe);
226 void rxe_release(struct kref *kref);
44 void rxe_cleanup_ports(struct rxe_dev *rxe);
54 void rxe_cleanup(struct rxe_dev *rxe);
83 const char __kstrtab_rxe_dev_put[12U] = { 'r', 'x', 'e', '_', 'd', 'e', 'v', '_', 'p', 'u', 't', '\x0' };
83 const struct kernel_symbol __ksymtab_rxe_dev_put;
83 const struct kernel_symbol __ksymtab_rxe_dev_put = { (unsigned long)(&rxe_dev_put), (const char *)(&__kstrtab_rxe_dev_put) };
86 int rxe_init_device_param(struct rxe_dev *rxe);
135 int rxe_init_port_param(struct rxe_port *port);
166 int rxe_init_ports(struct rxe_dev *rxe);
190 int rxe_init_pools(struct rxe_dev *rxe);
269 int rxe_init(struct rxe_dev *rxe);
315 const char __kstrtab_rxe_set_mtu[12U] = { 'r', 'x', 'e', '_', 's', 'e', 't', '_', 'm', 't', 'u', '\x0' };
315 const struct kernel_symbol __ksymtab_rxe_set_mtu;
315 const struct kernel_symbol __ksymtab_rxe_set_mtu = { (unsigned long)(&rxe_set_mtu), (const char *)(&__kstrtab_rxe_set_mtu) };
344 const char __kstrtab_rxe_add[8U] = { 'r', 'x', 'e', '_', 'a', 'd', 'd', '\x0' };
344 const struct kernel_symbol __ksymtab_rxe_add;
344 const struct kernel_symbol __ksymtab_rxe_add = { (unsigned long)(&rxe_add), (const char *)(&__kstrtab_rxe_add) };
353 const char __kstrtab_rxe_remove[11U] = { 'r', 'x', 'e', '_', 'r', 'e', 'm', 'o', 'v', 'e', '\x0' };
353 const struct kernel_symbol __ksymtab_rxe_remove;
353 const struct kernel_symbol __ksymtab_rxe_remove = { (unsigned long)(&rxe_remove), (const char *)(&__kstrtab_rxe_remove) };
355 int rxe_module_init();
377 void rxe_module_exit();
405 void ldv_check_final_state();
414 void ldv_initialize();
417 void ldv_handler_precall();
420 int nondet_int();
423 int LDV_IN_INTERRUPT = 0;
426 void ldv_main0_sequence_infinite_withcheck_stateful();
7 __u32 __arch_swab32(__u32 val);
14 __u64 __arch_swab64(__u64 val);
55 __u32 __fswab32(__u32 val);
64 __u64 __fswab64(__u64 val);
276 void __pr_warn(const char *, ...);
55 void __dynamic_pr_debug(struct _ddebug *, const char *, ...);
8 void ldv_spin_lock();
9 void ldv_spin_unlock();
56 void * __memset(void *, int, size_t );
89 void atomic_inc(atomic_t *v);
45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
376 void ldv_spin_unlock_irqrestore_52(spinlock_t *lock, unsigned long flags);
376 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
78 extern volatile unsigned long jiffies;
365 unsigned long int __usecs_to_jiffies(const unsigned int);
401 unsigned long int usecs_to_jiffies(const unsigned int u);
191 int mod_timer(struct timer_list *, unsigned long);
322 void * ldv_kmem_cache_alloc_60(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_80(struct kmem_cache *ldv_func_arg1, gfp_t flags);
894 void kfree_skb(struct sk_buff *);
971 struct sk_buff * ldv_skb_clone_70(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_79(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_72(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_67(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_68(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_76(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_77(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_78(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1533 __u32 skb_queue_len(const struct sk_buff_head *list_);
1725 void skb_queue_tail(struct sk_buff_head *, struct sk_buff *);
1757 struct sk_buff * skb_dequeue(struct sk_buff_head *);
2395 struct sk_buff * ldv___netdev_alloc_skb_73(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_74(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_75(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
127 struct rxe_opcode_info rxe_opcode[256U];
152 u8 __bth_pad(void *arg);
330 u8 bth_pad(struct rxe_pkt_info *pkt);
768 u8 __aeth_syn(void *arg);
800 u8 aeth_syn(struct rxe_pkt_info *pkt);
831 u64 __atmack_orig(void *arg);
845 u64 atmack_orig(struct rxe_pkt_info *pkt);
940 void * payload_addr(struct rxe_pkt_info *pkt);
946 size_t payload_size(struct rxe_pkt_info *pkt);
155 void rxe_elem_release(struct kref *kref);
87 void rxe_run_task(struct rxe_task *task, int sched);
53 int psn_compare(u32 psn_a, u32 psn_b);
430 struct rxe_dev * to_rdev(struct ib_device *dev);
65 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
121 int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum copy_direction dir, u32 *crcp);
161 void rxe_qp_error(struct rxe_qp *qp);
172 enum ib_qp_type qp_type(struct rxe_qp *qp);
205 void retransmit_timer(unsigned long data);
228 int rxe_completer(void *arg);
237 void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, struct sk_buff *skb);
108 int queue_empty(struct rxe_queue *q);
126 void advance_consumer(struct rxe_queue *q);
138 void * consumer_addr(struct rxe_queue *q);
173 void * queue_head(struct rxe_queue *q);
59 char *comp_state_name[15U] = { (char *)"GET ACK", (char *)"GET WQE", (char *)"COMP WQE", (char *)"COMP ACK", (char *)"CHECK PSN", (char *)"CHECK ACK", (char *)"READ", (char *)"ATOMIC", (char *)"WRITE/SEND", (char *)"UPDATE COMP", (char *)"ERROR RETRY", (char *)"RNR RETRY", (char *)"ERROR", (char *)"EXIT", (char *)"DONE" };
77 unsigned long rnrnak_usec[32U] = { 655360UL, 10UL, 20UL, 30UL, 40UL, 60UL, 80UL, 120UL, 160UL, 240UL, 320UL, 480UL, 640UL, 960UL, 1280UL, 1920UL, 2560UL, 3840UL, 5120UL, 7680UL, 10240UL, 15360UL, 20480UL, 30720UL, 40960UL, 61410UL, 81920UL, 122880UL, 163840UL, 245760UL, 327680UL, 491520UL };
112 unsigned long int rnrnak_jiffies(u8 timeout);
118 enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode);
160 enum comp_state get_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe **wqe_p);
188 void reset_retry_counters(struct rxe_qp *qp);
194 enum comp_state check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);
233 enum comp_state check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);
348 enum comp_state do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);
367 enum comp_state do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);
385 void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe);
415 void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
438 enum comp_state complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);
484 enum comp_state complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);
218 void __read_once_size(const volatile void *p, void *res, int size);
46 __u16 __fswab16(__u16 val);
31 void * __memcpy(void *, const void *, size_t );
16 void __xadd_wrong_size();
24 int atomic_read(const atomic_t *v);
154 int atomic_add_return(int i, atomic_t *v);
166 int atomic_sub_return(int i, atomic_t *v);
184 int timer_pending(const struct timer_list *timer);
322 void * ldv_kmem_cache_alloc_100(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_120(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_110(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_119(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_112(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_107(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_108(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_116(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_117(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_118(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_113(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_114(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_115(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
11 u32 crc32_le(u32 , const unsigned char *, size_t );
65 struct rxe_wr_opcode_info rxe_wr_opcode_info[12U];
425 void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se, int mig, int pad, u16 pkey, u32 qpn, int ack_req, u32 psn);
498 void __deth_set_qkey(void *arg, u32 qkey);
512 void __deth_set_sqp(void *arg, u32 sqp);
525 void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey);
537 void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp);
559 void __reth_set_va(void *arg, u64 va);
573 void __reth_set_rkey(void *arg, u32 rkey);
587 void __reth_set_len(void *arg, u32 len);
600 void reth_set_va(struct rxe_pkt_info *pkt, u64 va);
612 void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey);
624 void reth_set_len(struct rxe_pkt_info *pkt, u32 len);
647 void __atmeth_set_va(void *arg, u64 va);
661 void __atmeth_set_rkey(void *arg, u32 rkey);
675 void __atmeth_set_swap_add(void *arg, u64 swap_add);
689 void __atmeth_set_comp(void *arg, u64 comp);
702 void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va);
714 void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey);
726 void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add);
738 void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp);
871 void __immdt_set_imm(void *arg, __be32 imm);
884 void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm);
904 void __ieth_set_rkey(void *arg, u32 rkey);
917 void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey);
149 void * rxe_pool_get_index(struct rxe_pool *pool, u32 index);
465 struct rxe_mem * to_rmr(struct ib_mr *mr);
53 struct rxe_av * rxe_get_av(struct rxe_pkt_info *pkt);
142 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
206 void rnr_nak_timer(unsigned long data);
229 int rxe_requester(void *arg);
240 unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp);
245 int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct sk_buff *skb);
103 int next_index(struct rxe_queue *q, int index);
144 unsigned int producer_index(struct rxe_queue *q);
149 unsigned int consumer_index(struct rxe_queue *q);
154 void * addr_from_index(struct rxe_queue *q, unsigned int index);
40 int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, unsigned int opcode);
43 void retry_first_write_send(struct rxe_qp *qp, struct rxe_send_wqe *wqe, unsigned int mask, int npsn);
67 void req_retry(struct rxe_qp *qp);
128 struct rxe_send_wqe * req_next_wqe(struct rxe_qp *qp);
190 int next_opcode_rc(struct rxe_qp *qp, unsigned int opcode, int fits);
262 int next_opcode_uc(struct rxe_qp *qp, unsigned int opcode, int fits);
344 int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
364 int get_mtu(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
379 struct sk_buff * init_req_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, int opcode, int payload, struct rxe_pkt_info *pkt);
474 int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, int paylen);
513 void update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, enum wqe_state *prev_state);
530 void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, int payload);
162 int printk(const char *, ...);
30 void _raw_spin_lock_bh(raw_spinlock_t *);
42 void _raw_spin_unlock_bh(raw_spinlock_t *);
309 void ldv_spin_lock_bh_126(spinlock_t *lock);
309 void spin_lock_bh(spinlock_t *lock);
358 void ldv_spin_unlock_bh_130(spinlock_t *lock);
358 void spin_unlock_bh(spinlock_t *lock);
40 void kref_get(struct kref *kref);
322 void * ldv_kmem_cache_alloc_140(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_160(struct kmem_cache *ldv_func_arg1, gfp_t flags);
75 int ___ratelimit(struct ratelimit_state *, const char *);
971 struct sk_buff * ldv_skb_clone_150(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_159(struct sk_buff *ldv_func_arg1, gfp_t flags);
979 struct sk_buff * ldv_skb_clone_161(struct sk_buff *ldv_func_arg1, gfp_t flags);
983 struct sk_buff * ldv_skb_clone_162(struct sk_buff *ldv_func_arg1, gfp_t flags);
988 struct sk_buff * ldv_skb_copy_152(const struct sk_buff *ldv_func_arg1, gfp_t flags);
1001 int ldv_pskb_expand_head_147(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_148(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_156(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1013 int ldv_pskb_expand_head_157(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1017 int ldv_pskb_expand_head_158(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1480 struct sk_buff * skb_peek(const struct sk_buff_head *list_);
2143 unsigned char * skb_network_header(const struct sk_buff *skb);
2395 struct sk_buff * ldv___netdev_alloc_skb_153(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_154(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_155(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
81 struct ipv6hdr * ipv6_hdr(const struct sk_buff *skb);
23 struct iphdr * ip_hdr(const struct sk_buff *skb);
111 void __bth_set_opcode(void *arg, u8 opcode);
118 u8 __bth_se(void *arg);
125 void __bth_set_se(void *arg, int se);
159 void __bth_set_pad(void *arg, u8 pad);
203 void __bth_set_qpn(void *arg, u32 qpn);
260 int __bth_ack(void *arg);
267 void __bth_set_ack(void *arg, int ack);
291 void __bth_set_psn(void *arg, u32 psn);
305 void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode);
310 u8 bth_se(struct rxe_pkt_info *pkt);
315 void bth_set_se(struct rxe_pkt_info *pkt, int se);
335 void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad);
365 void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn);
400 int bth_ack(struct rxe_pkt_info *pkt);
405 void bth_set_ack(struct rxe_pkt_info *pkt, int ack);
420 void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn);
505 u32 __deth_sqp(void *arg);
531 u32 deth_sqp(struct rxe_pkt_info *pkt);
552 u64 __reth_va(void *arg);
566 u32 __reth_rkey(void *arg);
580 u32 __reth_len(void *arg);
594 u64 reth_va(struct rxe_pkt_info *pkt);
606 u32 reth_rkey(struct rxe_pkt_info *pkt);
618 u32 reth_len(struct rxe_pkt_info *pkt);
640 u64 __atmeth_va(void *arg);
654 u32 __atmeth_rkey(void *arg);
668 u64 __atmeth_swap_add(void *arg);
682 u64 __atmeth_comp(void *arg);
696 u64 atmeth_va(struct rxe_pkt_info *pkt);
708 u32 atmeth_rkey(struct rxe_pkt_info *pkt);
720 u64 atmeth_swap_add(struct rxe_pkt_info *pkt);
732 u64 atmeth_comp(struct rxe_pkt_info *pkt);
775 void __aeth_set_syn(void *arg, u8 syn);
791 void __aeth_set_msn(void *arg, u32 msn);
806 void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn);
818 void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn);
838 void __atmack_set_orig(void *arg, u64 orig);
851 void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig);
864 __be32 __immdt_imm(void *arg);
878 __be32 immdt_imm(struct rxe_pkt_info *pkt);
897 u32 __ieth_rkey(void *arg);
911 u32 ieth_rkey(struct rxe_pkt_info *pkt);
118 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, enum copy_direction dir, u32 *crcp);
125 void * iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
132 struct rxe_mem * lookup_mem(struct rxe_pd *pd, int access, u32 key, enum lookup_type type);
135 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
167 int qp_num(struct rxe_qp *qp);
196 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
198 void rxe_advance_resp_resource(struct rxe_qp *qp);
230 int rxe_responder(void *arg);
234 void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, struct sk_buff *skb);
167 unsigned int queue_count(const struct rxe_queue *q);
73 char *resp_state_name[30U] = { (char *)"NONE", (char *)"GET_REQ", (char *)"CHK_PSN", (char *)"CHK_OP_SEQ", (char *)"CHK_OP_VALID", (char *)"CHK_RESOURCE", (char *)"CHK_LENGTH", (char *)"CHK_RKEY", (char *)"EXECUTE", (char *)"READ_REPLY", (char *)"COMPLETE", (char *)"ACKNOWLEDGE", (char *)"CLEANUP", (char *)"DUPLICATE_REQUEST", (char *)"ERR_MALFORMED_WQE", (char *)"ERR_UNSUPPORTED_OPCODE", (char *)"ERR_MISALIGNED_ATOMIC", (char *)"ERR_PSN_OUT_OF_SEQ", (char *)"ERR_MISSING_OPCODE_FIRST", (char *)"ERR_MISSING_OPCODE_LAST_C", (char *)"ERR_MISSING_OPCODE_LAST_D1E", (char *)"ERR_TOO_MANY_RDMA_ATM_REQ", (char *)"ERR_RNR", (char *)"ERR_RKEY_VIOLATION", (char *)"ERR_LENGTH", (char *)"ERR_CQ_OVERFLOW", (char *)"ERROR", (char *)"RESET", (char *)"DONE", (char *)"EXIT" };
121 enum resp_states get_req(struct rxe_qp *qp, struct rxe_pkt_info **pkt_p);
148 enum resp_states check_psn___0(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
189 enum resp_states check_op_seq(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
279 enum resp_states check_op_valid(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
317 enum resp_states get_srq_wqe(struct rxe_qp *qp);
359 enum resp_states check_resource(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
403 enum resp_states check_length(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
418 enum resp_states check_rkey(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
502 enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, int data_len);
517 enum resp_states write_data_in(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
539 struct spinlock atomic_ops_lock = { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "atomic_ops_lock", 0, 0UL } } } };
541 enum resp_states process_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
581 struct sk_buff * prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_pkt_info *ack, int opcode, int payload, u32 psn, u8 syndrome, u32 *crcp);
653 enum resp_states read_reply(struct rxe_qp *qp, struct rxe_pkt_info *req_pkt);
755 enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
812 enum resp_states do_complete___0(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
919 int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, u8 syndrome, u32 psn);
944 int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, u8 syndrome);
992 enum resp_states acknowledge(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
1008 enum resp_states cleanup(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
1027 struct resp_res * find_resource(struct rxe_qp *qp, u32 psn);
1046 enum resp_states duplicate_request(struct rxe_qp *qp, struct rxe_pkt_info *pkt);
1141 void do_class_ac_error(struct rxe_qp *qp, u8 syndrome, enum ib_wc_status status);
1151 enum resp_states do_class_d1e_error(struct rxe_qp *qp);
411 int sprintf(char *, const char *, ...);
62 int memcmp(const void *, const void *, size_t );
322 void * ldv_kmem_cache_alloc_182(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_202(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_192(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_201(struct sk_buff *ldv_func_arg1, gfp_t flags);
979 struct sk_buff * ldv_skb_clone_203(struct sk_buff *ldv_func_arg1, gfp_t flags);
984 struct sk_buff * ldv_skb_copy_194(const struct sk_buff *ldv_func_arg1, gfp_t flags);
997 int ldv_pskb_expand_head_189(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_190(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_198(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_199(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1013 int ldv_pskb_expand_head_200(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_195(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_196(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_197(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
436 void __ipv6_addr_set_half(__be32 *addr, __be32 wh, __be32 wl);
456 void ipv6_addr_set(struct in6_addr *addr, __be32 w1, __be32 w2, __be32 w3, __be32 w4);
649 void ipv6_addr_set_v4mapped(const __be32 addr, struct in6_addr *v4mapped);
94 int ib_find_cached_gid_by_port(struct ib_device *, const union ib_gid *, enum ib_gid_type , u8 , struct net_device *, u16 *);
104 u8 __bth_opcode(void *arg);
167 u8 __bth_tver(void *arg);
182 u16 __bth_pkey(void *arg);
196 u32 __bth_qpn(void *arg);
284 u32 __bth_psn(void *arg);
300 u8 bth_opcode(struct rxe_pkt_info *pkt);
340 u8 bth_tver(struct rxe_pkt_info *pkt);
350 u16 bth_pkey(struct rxe_pkt_info *pkt);
360 u32 bth_qpn(struct rxe_pkt_info *pkt);
415 u32 bth_psn(struct rxe_pkt_info *pkt);
491 u32 __deth_qkey(void *arg);
519 u32 deth_qkey(struct rxe_pkt_info *pkt);
935 size_t header_size(struct rxe_pkt_info *pkt);
152 void * rxe_pool_get_key(struct rxe_pool *pool, void *key);
42 int pkey_match(u16 key1, u16 key2);
68 int rxe_rcv(struct sk_buff *skb);
232 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
39 int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp);
85 void set_bad_pkey_cntr(struct rxe_port *port);
93 void set_qkey_viol_cntr(struct rxe_port *port);
101 int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, u32 qpn, struct rxe_qp *qp);
154 int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp);
214 int hdr_check(struct rxe_pkt_info *pkt);
265 void rxe_rcv_pkt(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb);
275 void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb);
335 int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb);
420 const char __kstrtab_rxe_rcv[8U] = { 'r', 'x', 'e', '_', 'r', 'c', 'v', '\x0' };
420 const struct kernel_symbol __ksymtab_rxe_rcv;
420 const struct kernel_symbol __ksymtab_rxe_rcv = { (unsigned long)(&rxe_rcv), (const char *)(&__kstrtab_rxe_rcv) };
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
28 unsigned long int find_next_zero_bit(const unsigned long *, unsigned long, unsigned long);
53 unsigned long int find_first_zero_bit(const unsigned long *, unsigned long);
184 void __might_sleep(const char *, int, int);
191 void bitmap_zero(unsigned long *dst, unsigned int nbits);
24 int atomic_read___0(const atomic_t *v);
36 void atomic_set___0(atomic_t *v, int i);
101 void atomic_dec(atomic_t *v);
62 void rb_insert_color(struct rb_node *, struct rb_root *);
63 void rb_erase(struct rb_node *, struct rb_root *);
82 void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link);
31 void kref_init___0(struct kref *kref);
127 struct kmem_cache * kmem_cache_create(const char *, size_t , size_t , unsigned long, void (*)(void *));
130 void kmem_cache_destroy(struct kmem_cache *);
322 void * ldv_kmem_cache_alloc_223(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_243(struct kmem_cache *ldv_func_arg1, gfp_t flags);
328 void kmem_cache_free(struct kmem_cache *, void *);
466 void * kmalloc(size_t size, gfp_t flags);
612 void * kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_233(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_242(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_235(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_230(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_231(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_239(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_240(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_241(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_236(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_237(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_238(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
72 struct rxe_type_info rxe_type_info[10U];
130 void * rxe_alloc(struct rxe_pool *pool);
135 void rxe_add_index(void *arg);
138 void rxe_drop_index(void *arg);
143 void rxe_add_key(void *arg, void *key);
146 void rxe_drop_key(void *arg);
478 void rxe_mc_cleanup(void *arg);
67 void rxe_cq_cleanup(void *arg);
140 void rxe_mem_cleanup(void *arg);
165 void rxe_qp_cleanup(void *arg);
41 struct rxe_type_info rxe_type_info[10U] = { { (char *)"rxe-uc", 496UL, 0, 0, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-pd", 104UL, 0, 0, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-ah", 184UL, 0, 1, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-srq", 320UL, 0, 2, 262144U, 131073U, 0UL, 0UL, 0 }, { (char *)"rxe-qp", 2912UL, &rxe_qp_cleanup, 2, 131072U, 16U, 0UL, 0UL, 0 }, { (char *)"rxe-cq", 336UL, &rxe_cq_cleanup, 0, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-mr", 232UL, &rxe_mem_cleanup, 2, 262144U, 1U, 0UL, 0UL, 0 }, { (char *)"rxe-mw", 232UL, 0, 2, 393216U, 262145U, 0UL, 0UL, 0 }, { (char *)"rxe-mc_grp", 192UL, &rxe_mc_cleanup, 4, 0U, 0U, 160UL, 16UL, 0 }, { (char *)"rxe-mc_elem", 112UL, 0, 1, 0U, 0U, 0UL, 0UL, 0 } };
105 char * pool_name(struct rxe_pool *pool);
110 struct kmem_cache * pool_cache(struct rxe_pool *pool);
166 int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min);
239 void rxe_pool_release(struct kref *kref);
247 void rxe_pool_put(struct rxe_pool *pool);
268 u32 alloc_index(struct rxe_pool *pool);
282 void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new);
309 void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new);
1 unsigned long int __builtin_object_size(void *, int);
479 int fls64(__u64 x);
187 unsigned int fls_long(unsigned long l);
40 int __ilog2_u64(u64 n);
61 unsigned long int __roundup_pow_of_two(unsigned long n);
254 void __might_fault(const char *, int);
48 void __list_add(struct list_head *, struct list_head *, struct list_head *);
61 void list_add(struct list_head *new, struct list_head *head);
154 extern struct cpuinfo_x86 boot_cpu_data;
322 void * ldv_kmem_cache_alloc_263(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_283(struct kmem_cache *ldv_func_arg1, gfp_t flags);
19 void ldv_check_alloc_nonatomic();
75 void * ldv_vmalloc_user_284(unsigned long ldv_func_arg1);
88 void vfree(const void *);
5 void kasan_check_read(const void *, unsigned int);
697 unsigned long int _copy_to_user(void *, const void *, unsigned int);
722 void __copy_to_user_overflow();
775 unsigned long int copy_to_user(void *to, const void *from, unsigned long n);
971 struct sk_buff * ldv_skb_clone_273(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_282(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_275(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_270(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_271(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_279(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_280(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_281(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_276(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_277(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_278(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
93 void rxe_mmap_release(struct kref *ref);
95 struct rxe_mmap_info * rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, struct ib_ucontext *context, void *obj);
79 int do_mmap_info(struct rxe_dev *rxe, struct ib_udata *udata, bool is_req, struct ib_ucontext *context, struct rxe_queue_buf *buf, size_t buf_size, struct rxe_mmap_info **ip_p);
87 struct rxe_queue * rxe_queue_init(struct rxe_dev *rxe, int *num_elem, unsigned int elem_size);
91 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, unsigned int elem_size, struct ib_ucontext *context, struct ib_udata *udata, spinlock_t *producer_lock, spinlock_t *consumer_lock);
101 void rxe_queue_cleanup(struct rxe_queue *q);
120 void advance_producer(struct rxe_queue *q);
132 void * producer_addr(struct rxe_queue *q);
143 int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, unsigned int num_elem);
33 extern struct module __this_module;
437 int fls(int x);
32 int __ilog2_u32(u32 n);
414 int snprintf(char *, size_t , const char *, ...);
5 void * ldv_err_ptr(long error);
87 void __bad_percpu_size();
295 void __bad_size_call_parameter();
27 size_t strlcpy(char *, const char *, size_t );
23 void * ERR_PTR(long error);
138 void mutex_lock_nested(struct mutex *, unsigned int);
174 void mutex_unlock(struct mutex *);
322 void * ldv_kmem_cache_alloc_304(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_324(struct kmem_cache *ldv_func_arg1, gfp_t flags);
6 void kasan_check_write(const void *, unsigned int);
695 unsigned long int _copy_from_user(void *, const void *, unsigned int);
717 void __copy_from_user_overflow();
738 unsigned long int copy_from_user(void *to, const void *from, unsigned long n);
595 int device_create_file(struct device *, const struct device_attribute *);
597 void device_remove_file(struct device *, const struct device_attribute *);
1137 void dev_warn(const struct device *, const char *, ...);
971 struct sk_buff * ldv_skb_clone_314(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_323(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_316(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_311(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_312(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_320(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_321(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_322(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_317(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_318(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_319(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
3317 void dev_put(struct net_device *dev);
3328 void dev_hold(struct net_device *dev);
75 extern union ib_gid zgid;
1217 struct ib_rdma_wr * rdma_wr(struct ib_send_wr *wr);
1232 struct ib_atomic_wr * atomic_wr(struct ib_send_wr *wr);
1249 struct ib_ud_wr * ud_wr(struct ib_send_wr *wr);
1261 struct ib_reg_wr * reg_wr(struct ib_send_wr *wr);
2091 int ib_register_device(struct ib_device *, int (*)(struct ib_device *, u8 , struct kobject *));
2094 void ib_unregister_device(struct ib_device *);
3293 int ib_sg_to_pages(struct ib_mr *, struct scatterlist *, int, unsigned int *, int (*)(struct ib_mr *, u64 ));
52 int ib_get_cached_gid(struct ib_device *, u8 , int, union ib_gid *, struct ib_gid_attr *);
435 struct rxe_ucontext * to_ruc(struct ib_ucontext *uc);
440 struct rxe_pd * to_rpd(struct ib_pd *pd);
445 struct rxe_ah * to_rah(struct ib_ah *ah);
450 struct rxe_srq * to_rsrq(struct ib_srq *srq);
455 struct rxe_qp * to_rqp(struct ib_qp *qp);
460 struct rxe_cq * to_rcq(struct ib_cq *cq);
39 int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr);
41 int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num, struct rxe_av *av, struct ib_ah_attr *attr);
44 int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av, struct ib_ah_attr *attr);
47 int rxe_av_fill_ip_info(struct rxe_dev *rxe, struct rxe_av *av, struct ib_ah_attr *attr, struct ib_gid_attr *sgid_attr, union ib_gid *sgid);
56 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata);
59 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata);
63 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct ib_udata *udata);
70 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, struct rxe_mc_grp **grp_p);
73 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_mc_grp *grp);
76 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, union ib_gid *mgid);
100 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
108 int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_mem *mem);
111 int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata, struct rxe_mem *mem);
115 int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, int max_pages, struct rxe_mem *mem);
145 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
147 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct ib_udata *udata, struct ib_pd *ibpd);
151 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
153 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
156 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata);
159 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
163 void rxe_qp_destroy(struct rxe_qp *qp);
177 enum ib_qp_state qp_state(struct rxe_qp *qp);
213 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
216 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata);
220 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata);
224 struct ib_dma_mapping_ops rxe_dma_mapping_ops;
114 int queue_full(struct rxe_queue *q);
38 int rxe_query_device(struct ib_device *dev, struct ib_device_attr *attr, struct ib_udata *uhw);
51 void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed, u8 *active_width);
75 int rxe_query_port(struct ib_device *dev, u8 port_num, struct ib_port_attr *attr);
115 int rxe_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid);
132 int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int index, const union ib_gid *gid, const struct ib_gid_attr *attr, void **context);
141 int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int index, void **context);
149 struct net_device * rxe_get_netdev(struct ib_device *device, u8 port_num);
162 int rxe_query_pkey(struct ib_device *device, u8 port_num, u16 index, u16 *pkey);
189 int rxe_modify_device(struct ib_device *dev, int mask, struct ib_device_modify *attr);
205 int rxe_modify_port(struct ib_device *dev, u8 port_num, int mask, struct ib_port_modify *attr);
230 enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, u8 port_num);
238 struct ib_ucontext * rxe_alloc_ucontext(struct ib_device *dev, struct ib_udata *udata);
248 int rxe_dealloc_ucontext(struct ib_ucontext *ibuc);
256 int rxe_port_immutable(struct ib_device *dev, u8 port_num, struct ib_port_immutable *immutable);
274 struct ib_pd * rxe_alloc_pd(struct ib_device *dev, struct ib_ucontext *context, struct ib_udata *udata);
285 int rxe_dealloc_pd(struct ib_pd *ibpd);
293 int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr, struct rxe_av *av);
317 struct ib_ah * rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
350 int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr);
367 int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr);
376 int rxe_destroy_ah(struct ib_ah *ibah);
385 int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr);
432 struct ib_srq * rxe_create_srq(struct ib_pd *ibpd, struct ib_srq_init_attr *init, struct ib_udata *udata);
470 int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata);
492 int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
505 int rxe_destroy_srq(struct ib_srq *ibsrq);
519 int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr);
543 struct ib_qp * rxe_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init, struct ib_udata *udata);
585 int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata);
606 int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_qp_init_attr *init);
617 int rxe_destroy_qp(struct ib_qp *ibqp);
627 int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, unsigned int mask, unsigned int length);
654 void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, struct ib_send_wr *ibwr);
709 int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, unsigned int mask, unsigned int length, struct rxe_send_wqe *wqe);
763 int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, unsigned int mask, u32 length);
804 int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr);
864 int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr);
901 struct ib_cq * rxe_create_cq(struct ib_device *dev, const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata);
936 int rxe_destroy_cq(struct ib_cq *ibcq);
944 int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
964 int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
985 int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt);
993 int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1003 struct ib_mr * rxe_get_dma_mr(struct ib_pd *ibpd, int access);
1034 struct ib_mr * rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata);
1070 int rxe_dereg_mr(struct ib_mr *ibmr);
1081 struct ib_mr * rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg);
1117 int rxe_set_page(struct ib_mr *ibmr, u64 addr);
1136 int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents___0, unsigned int *sg_offset);
1156 int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
1174 int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
1182 ssize_t rxe_show_parent(struct device *device, struct device_attribute *attr, char *buf);
1193 struct device_attribute dev_attr_parent = { { "parent", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &rxe_show_parent, (ssize_t (*)(struct device *, struct device_attribute *, const char *, size_t ))0 };
1195 struct device_attribute *rxe_dev_attributes[1U] = { &dev_attr_parent };
322 void * ldv_kmem_cache_alloc_344(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_364(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_354(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_363(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_356(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_351(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_352(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_360(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_361(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_362(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_357(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_358(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_359(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
623 bool ipv6_addr_v4mapped(const struct in6_addr *a);
140 enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, union ib_gid *gid);
185 void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid);
10 extern struct task_struct *current_task;
12 struct task_struct * get_current();
322 void * ldv_kmem_cache_alloc_384(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_404(struct kmem_cache *ldv_func_arg1, gfp_t flags);
45 bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit);
971 struct sk_buff * ldv_skb_clone_394(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_403(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_396(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_391(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_392(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_400(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_401(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_402(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_397(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_398(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_399(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2103 int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len);
190 int rcv_wqe_size(int max_sge);
183 void ___might_sleep(const char *, int, int);
25 void INIT_LIST_HEAD___0(struct list_head *list);
36 void atomic_set___1(atomic_t *v, int i);
445 unsigned long int nsecs_to_jiffies(u64 );
95 void init_timer_key(struct timer_list *, unsigned int, const char *, struct lock_class_key *);
245 int del_timer_sync(struct timer_list *);
3209 int _cond_resched();
322 void * ldv_kmem_cache_alloc_424(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_444(struct kmem_cache *ldv_func_arg1, gfp_t flags);
217 int sock_create_kern(struct net *, int, int, int, struct socket **);
293 int kernel_sock_shutdown(struct socket *, enum sock_shutdown_cmd );
500 void kvfree(const void *);
971 struct sk_buff * ldv_skb_clone_434(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_443(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_436(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_431(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_432(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_440(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_441(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_442(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1548 void __skb_queue_head_init(struct sk_buff_head *list);
1562 void skb_queue_head_init(struct sk_buff_head *list);
2395 struct sk_buff * ldv___netdev_alloc_skb_437(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_438(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_439(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
7 extern struct net init_net;
2154 int ib_modify_qp_is_ok(enum ib_qp_state , enum ib_qp_state , enum ib_qp_type , enum ib_qp_attr_mask , enum rdma_link_layer );
64 int rxe_init_task(void *obj, struct rxe_task *task, void *arg, int (*func)(void *), char *name);
68 void rxe_cleanup_task(struct rxe_task *task);
74 int __rxe_do_task(struct rxe_task *task);
90 void rxe_disable_task(struct rxe_task *task);
93 void rxe_enable_task(struct rxe_task *task);
139 char *rxe_qp_state_name[6U];
79 void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
43 char *rxe_qp_state_name[6U] = { (char *)"RESET", (char *)"INIT", (char *)"READY", (char *)"DRAIN", (char *)"DRAINED", (char *)"ERROR" };
52 int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, int has_srq);
132 int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n);
144 void free_rd_atomic_resources(struct rxe_qp *qp);
171 void cleanup_rd_atomic_resources(struct rxe_qp *qp);
184 void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init);
226 int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata);
288 int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata);
501 void rxe_qp_reset(struct rxe_qp *qp);
557 void rxe_qp_drain(struct rxe_qp *qp);
204 bool test_and_set_bit(long nr, volatile unsigned long *addr);
322 void * ldv_kmem_cache_alloc_464(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_484(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_474(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_483(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_476(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_471(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_472(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_480(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_481(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_482(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_477(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_478(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_479(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
550 void __tasklet_schedule(struct tasklet_struct *);
552 void tasklet_schedule(struct tasklet_struct *t);
602 void tasklet_init(struct tasklet_struct *, void (*)(unsigned long), unsigned long);
69 void rxe_send_complete(unsigned long data);
52 bool is_power_of_2(unsigned long n);
3 bool ldv_is_err(const void *ptr);
6 long int ldv_ptr_err(const void *ptr);
7 extern unsigned long page_offset_base;
32 long int PTR_ERR(const void *ptr);
41 bool IS_ERR(const void *ptr);
318 void * __kmalloc(size_t , gfp_t );
322 void * ldv_kmem_cache_alloc_504(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_524(struct kmem_cache *ldv_func_arg1, gfp_t flags);
564 void * kmalloc_array(size_t n, size_t size, gfp_t flags);
1003 void * lowmem_page_address(const struct page *page);
120 struct page * sg_page(struct scatterlist *sg);
246 struct scatterlist * sg_next(struct scatterlist *);
971 struct sk_buff * ldv_skb_clone_514(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_523(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_516(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_511(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_512(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_520(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_521(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_522(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_517(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_518(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_519(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
61 int ib_umem_offset(struct ib_umem *umem);
85 struct ib_umem * ib_umem_get(struct ib_ucontext *, unsigned long, size_t , int, int);
87 void ib_umem_release(struct ib_umem *);
137 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem, u64 *page, int num_pages, u64 iova);
40 u8 rxe_get_key();
75 void rxe_mem_init(int access, struct rxe_mem *mem);
108 int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf);
264 void lookup_iova(struct rxe_mem *mem, u64 iova, int *m_out, int *n_out, size_t *offset_out);
13 int __get_order(unsigned long size);
467 struct page * alloc_pages(gfp_t flags, unsigned int order);
504 void free_pages(unsigned long, unsigned int);
322 void * ldv_kmem_cache_alloc_544(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_564(struct kmem_cache *ldv_func_arg1, gfp_t flags);
125 int valid_dma_direction(int dma_direction);
971 struct sk_buff * ldv_skb_clone_554(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_563(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_556(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_551(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_552(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_560(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_561(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_562(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_557(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_558(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_559(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
40 int rxe_mapping_error(struct ib_device *dev, u64 dma_addr);
45 u64 rxe_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction);
53 void rxe_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction);
60 u64 rxe_dma_map_page(struct ib_device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction);
82 void rxe_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction);
89 int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction);
114 void rxe_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
121 void rxe_sync_single_for_cpu(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir);
127 void rxe_sync_single_for_device(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir);
133 void * rxe_dma_alloc_coherent(struct ib_device *dev, size_t size, u64 *dma_handle, gfp_t flag);
149 void rxe_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, u64 dma_handle);
155 struct ib_dma_mapping_ops rxe_dma_mapping_ops = { &rxe_mapping_error, &rxe_dma_map_single, &rxe_dma_unmap_single, &rxe_dma_map_page, &rxe_dma_unmap_page, &rxe_map_sg, &rxe_unmap_sg, &rxe_sync_single_for_cpu, &rxe_sync_single_for_device, &rxe_dma_alloc_coherent, &rxe_dma_free_coherent };
206 void ldv_main13_sequence_infinite_withcheck_stateful();
322 void * ldv_kmem_cache_alloc_584(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_604(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_594(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_603(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_596(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_591(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_592(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_600(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_601(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_602(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_597(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_598(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_599(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
41 struct rxe_wr_opcode_info rxe_wr_opcode_info[12U] = { { (char *)"IB_WR_RDMA_WRITE", { 0, 0, 17, 17 } }, { (char *)"IB_WR_RDMA_WRITE_WITH_IMM", { 0, 0, 17, 17 } }, { (char *)"IB_WR_SEND", { 5, 5, 5, 5, 5 } }, { (char *)"IB_WR_SEND_WITH_IMM", { 5, 5, 5, 5, 5 } }, { (char *)"IB_WR_RDMA_READ", { 0, 0, 8 } }, { (char *)"IB_WR_ATOMIC_CMP_AND_SWP", { 0, 0, 2 } }, { (char *)"IB_WR_ATOMIC_FETCH_AND_ADD", { 0, 0, 2 } }, { (char *)"IB_WR_LSO", { 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_WR_SEND_WITH_INV", { 0, 0, 5, 5, 5 } }, { (char *)"IB_WR_RDMA_READ_WITH_INV", { 0, 0, 8 } }, { (char *)"IB_WR_LOCAL_INV", { 0, 0, 64 } }, { (char *)"IB_WR_REG_MR", { 0, 0, 64 } } };
128 struct rxe_opcode_info rxe_opcode[256U] = { { (char *)"IB_OPCODE_RC_SEND_FIRST", 1333248, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_MIDDLE]", 2119680, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_LAST", 4741120, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE", 4742144, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_RC_SEND_ONLY", 6051840, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE", 6052864, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_FIRST", 1087496, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_MIDDLE", 2136064, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_LAST", 4233216, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE", 5020672, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_ONLY", 5281800, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 6069256, 32, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 28, 32 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_REQUEST", 5312520, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST", 1058832, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE", 2107392, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST", 4204560, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY", 5253136, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_ACKNOWLEDGE", 5251088, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE", 5251152, 24, { 0, 0, 0, 0, 12, 0, 16, 0, 0, 0, 0, 24 } }, { (char *)"IB_OPCODE_RC_COMPARE_SWAP", 5378080, 40, { 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 40 } }, { (char *)"IB_OPCODE_RC_FETCH_ADD", 5378080, 40, { 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 40 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE", 4741248, 16, { 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_SEND_ONLY_INV", 5003392, 16, { 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 16 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_UC_SEND_FIRST", 1333248, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_MIDDLE", 2119680, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_LAST", 4741120, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE", 4742144, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_UC_SEND_ONLY", 6051840, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE", 6052864, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_FIRST", 1087496, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_MIDDLE", 2136064, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_LAST", 4233216, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE", 5020672, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_ONLY", 5281800, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 6069256, 32, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 28, 32 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_RD_SEND_FIRST", 1334016, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_MIDDLE", 2120448, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_LAST", 4741888, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE", 4742912, 28, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 24, 28 } }, { (char *)"IB_OPCODE_RD_SEND_ONLY", 6052608, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE", 6053632, 28, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 24, 28 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_FIRST", 1088264, 40, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 0, 40 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_MIDDLE", 2136832, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_LAST", 4233984, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE", 5021440, 28, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 24, 28 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_ONLY", 5282568, 40, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 0, 40 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 6070024, 44, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 40, 44 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_REQUEST", 5313288, 40, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 0, 40 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST", 1059088, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12, 0, 0, 20 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE", 2107648, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 16 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST", 4204816, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12, 0, 0, 20 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY", 5253392, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12, 0, 0, 20 } }, { (char *)"IB_OPCODE_RD_ACKNOWLEDGE", 5251344, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE", 5251408, 28, { 0, 0, 0, 0, 16, 0, 20, 0, 12 } }, { (char *)"RD_COMPARE_SWAP", 5378848, 52, { 0, 0, 0, 0, 0, 24, 0, 0, 12, 16, 0, 52 } }, { (char *)"IB_OPCODE_RD_FETCH_ADD", 5378848, 52, { 0, 0, 0, 0, 0, 24, 0, 0, 12, 16, 0, 52 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_UD_SEND_ONLY", 6052352, 20, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 20 } }, { (char *)"IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE", 6053376, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 20, 24 } } };
25 void INIT_LIST_HEAD___1(struct list_head *list);
112 void __list_del_entry(struct list_head *);
113 void list_del(struct list_head *);
143 void list_del_init(struct list_head *entry);
187 int list_empty(const struct list_head *head);
36 void atomic_set___2(atomic_t *v, int i);
31 void kref_init___1(struct kref *kref);
322 void * ldv_kmem_cache_alloc_624(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_644(struct kmem_cache *ldv_func_arg1, gfp_t flags);
94 int remap_vmalloc_range(struct vm_area_struct *, void *, unsigned long);
971 struct sk_buff * ldv_skb_clone_634(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_643(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_636(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_631(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_632(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_640(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_641(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_642(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_637(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_638(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_639(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
66 void rxe_vma_open(struct vm_area_struct *vma);
73 void rxe_vma_close(struct vm_area_struct *vma);
80 struct vm_operations_struct rxe_vm_ops = { &rxe_vma_open, &rxe_vma_close, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
213 void ldv_main15_sequence_infinite_withcheck_stateful();
322 void * ldv_kmem_cache_alloc_664(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_684(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_674(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_683(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_676(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_671(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_672(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_680(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_681(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_682(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_677(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_678(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_679(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
25 void INIT_LIST_HEAD___2(struct list_head *list);
187 int list_empty___0(const struct list_head *head);
322 void * ldv_kmem_cache_alloc_704(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_724(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_714(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_723(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_716(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_711(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_712(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_720(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_721(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_722(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_717(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_718(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_719(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
308 bool constant_test_bit(long nr, const volatile unsigned long *addr);
322 void * ldv_kmem_cache_alloc_744(struct kmem_cache *ldv_func_arg1, gfp_t flags);
540 void tasklet_unlock_wait(struct tasklet_struct *t);
581 void tasklet_disable_nosync(struct tasklet_struct *t);
587 void tasklet_disable(struct tasklet_struct *t);
594 void tasklet_enable(struct tasklet_struct *t);
600 void tasklet_kill(struct tasklet_struct *);
82 void rxe_do_task(unsigned long data);
75 void list_add_tail(struct list_head *new, struct list_head *head);
187 int list_empty___1(const struct list_head *head);
66 int strcmp(const char *, const char *);
502 int rcu_read_lock_held();
503 int rcu_read_lock_bh_held();
322 void * ldv_kmem_cache_alloc_766(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_786(struct kmem_cache *ldv_func_arg1, gfp_t flags);
831 struct dst_entry * skb_dst(const struct sk_buff *skb);
850 void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst);
909 struct sk_buff * alloc_skb(unsigned int size, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_776(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_785(struct sk_buff *ldv_func_arg1, gfp_t flags);
979 struct sk_buff * ldv_skb_clone_787(struct sk_buff *ldv_func_arg1, gfp_t flags);
984 struct sk_buff * ldv_skb_copy_778(const struct sk_buff *ldv_func_arg1, gfp_t flags);
997 int ldv_pskb_expand_head_773(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_774(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_782(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_783(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1013 int ldv_pskb_expand_head_784(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1047 void skb_clear_hash(struct sk_buff *skb);
1183 unsigned char * skb_end_pointer(const struct sk_buff *skb);
1784 bool skb_is_nonlinear(const struct sk_buff *skb);
1905 unsigned char * skb_put(struct sk_buff *, unsigned int);
1915 unsigned char * skb_push(struct sk_buff *, unsigned int);
1916 unsigned char * __skb_push(struct sk_buff *skb, unsigned int len);
1936 unsigned char * __pskb_pull_tail(struct sk_buff *, int);
2006 void skb_reserve(struct sk_buff *skb, int len);
2126 unsigned char * skb_transport_header(const struct sk_buff *skb);
2131 void skb_reset_transport_header(struct sk_buff *skb);
2148 void skb_reset_network_header(struct sk_buff *skb);
2395 struct sk_buff * ldv___netdev_alloc_skb_779(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_780(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_781(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2807 int __skb_linearize(struct sk_buff *skb);
2819 int skb_linearize(struct sk_buff *skb);
3025 void skb_scrub_packet(struct sk_buff *, bool );
256 struct net * read_pnet(const possible_net_t *pnet);
2015 struct net * dev_net(const struct net_device *dev);
2279 int register_netdevice_notifier(struct notifier_block *);
2280 int unregister_netdevice_notifier(struct notifier_block *);
2311 struct net_device * netdev_notifier_info_to_dev(const struct netdev_notifier_info *info);
3744 int dev_mc_add(struct net_device *, const unsigned char *);
3747 int dev_mc_del(struct net_device *, const unsigned char *);
111 struct net_device * vlan_dev_real_dev(const struct net_device *);
273 void dst_release(struct dst_entry *);
2183 struct net * sock_net(const struct sock *sk);
25 struct udphdr * udp_hdr(const struct sk_buff *skb);
211 void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf);
831 void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass, __be32 flowlabel);
928 int ip6_local_out(struct net *, struct sock *, struct sk_buff *);
126 struct rtable * ip_route_output_flow(struct net *, struct flowi4 *, const struct sock *);
131 struct rtable * ip_route_output_key(struct net *net, struct flowi4 *flp);
213 extern const struct ipv6_stub *ipv6_stub;
117 void ip_send_check(struct iphdr *);
119 int ip_local_out(struct net *, struct sock *, struct sk_buff *);
330 void __ip_select_ident(struct net *, struct iphdr *, int);
38 int udp_sock_create4(struct net *, struct udp_port_cfg *, struct socket **);
42 int udp_sock_create6(struct net *, struct udp_port_cfg *, struct socket **);
52 int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp);
84 void setup_udp_tunnel_sock(struct net *, struct socket *, struct udp_tunnel_sock_cfg *);
142 void udp_tunnel_sock_release(struct socket *);
2086 struct ib_device * ib_alloc_device(size_t );
2160 void ib_dispatch_event(struct ib_event *);
46 struct rxe_recv_sockets recv_sockets = { };
48 struct rxe_dev * rxe_net_add(struct net_device *ndev);
71 struct rxe_dev * net_to_rxe(struct net_device *ndev);
72 struct rxe_dev * get_rxe_by_name(const char *name);
74 void rxe_port_up(struct rxe_dev *rxe);
75 void rxe_port_down(struct rxe_dev *rxe);
49 struct list_head rxe_dev_list = { &rxe_dev_list, &rxe_dev_list };
50 struct spinlock dev_list_lock = { };
88 __be64 rxe_mac_to_eui64(struct net_device *ndev);
106 __be64 node_guid(struct rxe_dev *rxe);
111 __be64 port_guid(struct rxe_dev *rxe);
116 struct device * dma_device(struct rxe_dev *rxe);
128 int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
139 int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
150 struct dst_entry * rxe_find_route4(struct net_device *ndev, struct in_addr *saddr, struct in_addr *daddr);
173 struct dst_entry * rxe_find_route6(struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr);
214 int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
242 struct socket * rxe_setup_udp_tunnel(struct net *net, __be16 port, bool ipv6);
279 void rxe_release_udp_tunnel(struct socket *sk);
284 void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, __be16 dst_port);
299 void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, __be32 saddr, __be32 daddr, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet);
330 void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, struct in6_addr *saddr, struct in6_addr *daddr, __u8 proto, __u8 prio, __u8 ttl);
353 int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av);
379 int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av);
404 int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
420 void rxe_skb_tx_dtor(struct sk_buff *skb);
431 int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb);
467 int loopback(struct sk_buff *skb);
477 struct sk_buff * init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt);
517 char * parent_name(struct rxe_dev *rxe, unsigned int port_num);
522 enum rdma_link_layer link_layer(struct rxe_dev *rxe, unsigned int port_num);
528 struct rxe_ifc_ops ifc_ops = { 0, &node_guid, &port_guid, &dma_device, &mcast_add, &mcast_delete, &prepare, &send, &loopback, &init_packet, &parent_name, &link_layer };
580 const char __kstrtab_rxe_remove_all[15U] = { 'r', 'x', 'e', '_', 'r', 'e', 'm', 'o', 'v', 'e', '_', 'a', 'l', 'l', '\x0' };
580 const struct kernel_symbol __ksymtab_rxe_remove_all;
580 const struct kernel_symbol __ksymtab_rxe_remove_all = { (unsigned long)(&rxe_remove_all), (const char *)(&__kstrtab_rxe_remove_all) };
582 void rxe_port_event(struct rxe_dev *rxe, enum ib_event_type event);
622 int rxe_notify(struct notifier_block *not_blk, unsigned long event, void *arg);
662 struct notifier_block rxe_net_notifier = { &rxe_notify, 0, 0 };
748 void ldv_main19_sequence_infinite_withcheck_stateful();
45 int strncmp(const char *, const char *, __kernel_size_t );
322 void * ldv_kmem_cache_alloc_807(struct kmem_cache *ldv_func_arg1, gfp_t flags);
326 void * ldv_kmem_cache_alloc_827(struct kmem_cache *ldv_func_arg1, gfp_t flags);
971 struct sk_buff * ldv_skb_clone_817(struct sk_buff *ldv_func_arg1, gfp_t flags);
975 struct sk_buff * ldv_skb_clone_826(struct sk_buff *ldv_func_arg1, gfp_t flags);
980 struct sk_buff * ldv_skb_copy_819(const struct sk_buff *ldv_func_arg1, gfp_t flags);
993 int ldv_pskb_expand_head_814(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
997 int ldv_pskb_expand_head_815(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1001 int ldv_pskb_expand_head_823(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1005 int ldv_pskb_expand_head_824(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
1009 int ldv_pskb_expand_head_825(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);
2395 struct sk_buff * ldv___netdev_alloc_skb_820(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2399 struct sk_buff * ldv___netdev_alloc_skb_821(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2403 struct sk_buff * ldv___netdev_alloc_skb_822(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);
2387 struct net_device * dev_get_by_name(struct net *, const char *);
3082 bool netif_running(const struct net_device *dev);
3352 bool netif_carrier_ok(const struct net_device *dev);
39 int sanitize_arg(const char *val, char *intf, int intf_len);
57 void rxe_set_port_state(struct net_device *ndev);
73 int rxe_param_set_add(const char *val, const struct kernel_param *kp);
116 int rxe_param_set_remove(const char *val, const struct kernel_param *kp);
197 void ldv_main20_sequence_infinite_withcheck_stateful();
10 void ldv_error();
25 int ldv_undef_int();
28 bool ldv_is_err_or_null(const void *ptr);
20 int ldv_spin = 0;
30 struct page * ldv_some_page();
33 struct page * ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
63 int ldv_spin_trylock();
return ;
}
-entry_point
{
750 struct rxe_dev *var_group1;
751 union ib_gid *var_mcast_add_6_p1;
752 union ib_gid *var_mcast_delete_7_p1;
753 struct rxe_pkt_info *var_group2;
754 struct sk_buff *var_prepare_19_p2;
755 u32 *var_prepare_19_p3;
756 struct sk_buff *var_send_21_p2;
757 struct sk_buff *var_group3;
758 struct rxe_av *var_group4;
759 int var_init_packet_24_p2;
760 struct rxe_pkt_info *var_init_packet_24_p3;
761 unsigned int var_parent_name_25_p1;
762 unsigned int var_link_layer_26_p1;
763 struct notifier_block *var_group5;
764 unsigned long var_rxe_notify_32_p1;
765 void *var_rxe_notify_32_p2;
766 int tmp;
767 int tmp___0;
873 LDV_IN_INTERRUPT = 1;
882 ldv_initialize() { /* Function call is skipped due to function is undefined */}
888 goto ldv_66141;
888 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
888 assume(tmp___0 != 0);
890 goto ldv_66140;
889 ldv_66140:;
891 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
891 switch (tmp);
892 assume(!(tmp == 0));
915 assume(!(tmp == 1));
937 assume(!(tmp == 2));
959 assume(!(tmp == 3));
981 assume(!(tmp == 4));
1003 assume(!(tmp == 5));
1024 assume(!(tmp == 6));
1045 assume(tmp == 7);
1057 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
1058 -loopback(var_group3)
{
469 int tmp;
469 -rxe_rcv(skb)
{
357 int err;
358 struct rxe_pkt_info *pkt;
359 struct rxe_dev *rxe;
360 __be32 *icrcp;
361 unsigned int calc_icrc;
362 unsigned int pack_icrc;
363 long tmp;
364 struct ratelimit_state _rs;
365 int tmp___0;
366 int tmp___1;
367 long tmp___2;
368 unsigned long tmp___3;
369 long tmp___4;
370 long tmp___5;
371 unsigned int tmp___6;
372 unsigned long tmp___7;
373 void *tmp___8;
374 unsigned int tmp___9;
375 char saddr[16U];
376 struct ipv6hdr *tmp___10;
377 struct iphdr *tmp___11;
378 struct ratelimit_state _rs___0;
379 int tmp___12;
380 long tmp___13;
381 unsigned int tmp___14;
382 long tmp___15;
358 pkt = (struct rxe_pkt_info *)(&(skb->cb));
359 rxe = pkt->rxe;
363 pkt->offset = 0U;
365 int __CPAchecker_TMP_0 = (int)(pkt->offset);
365 -__builtin_expect((skb->len) < ((unsigned int)(__CPAchecker_TMP_0 + 12)), 0L)
{
52 return exp;;
}
365 assume(!(tmp != 0L));
368 -rxe_match_dgid(rxe, skb)
{
337 union ib_gid dgid;
338 union ib_gid *pdgid;
339 unsigned short index;
340 struct iphdr *tmp;
341 struct ipv6hdr *tmp___0;
342 int tmp___1;
341 unsigned int __CPAchecker_TMP_0 = (unsigned int)(skb->protocol);
341 assume(!(__CPAchecker_TMP_0 == 8U));
346 -ipv6_hdr((const struct sk_buff *)skb)
{
83 unsigned char *tmp;
83 -skb_network_header(skb)
{
2145 unsigned char *__CPAchecker_TMP_0 = (unsigned char *)(skb->head);
2145 unsigned long __CPAchecker_TMP_1 = (unsigned long)(skb->network_header);
2145 return __CPAchecker_TMP_0 + __CPAchecker_TMP_1;;
}
83 return (struct ipv6hdr *)tmp;;
}
346 pdgid = (union ib_gid *)(&(tmp___0->daddr));
349 tmp___1 = ib_find_cached_gid_by_port(&(rxe->ib_dev), (const union ib_gid *)pdgid, 1, 1, rxe->ndev, &index) { /* Function call is skipped due to function is undefined */}
349 return tmp___1;;
}
368 -__builtin_expect(tmp___1 < 0, 0L)
{
52 return exp;;
}
368 assume(!(tmp___2 != 0L));
373 -bth_opcode(pkt)
{
302 unsigned char tmp;
302 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
302 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
302 -__bth_opcode(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
106 struct rxe_bth *bth;
106 bth = (struct rxe_bth *)arg;
108 return bth->opcode;;
}
302 return tmp;;
}
374 -bth_psn(pkt)
{
417 unsigned int tmp;
417 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
417 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
417 -__bth_psn(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
286 struct rxe_bth *bth;
287 unsigned int tmp;
286 bth = (struct rxe_bth *)arg;
288 -__fswab32(bth->apsn)
{
57 unsigned int tmp;
58 -__arch_swab32(val)
{
9 Ignored inline assembler code
10 return val;;
}
58 return tmp;;
}
288 return tmp & 16777215U;;
}
417 return tmp;;
}
375 pkt->qp = (struct rxe_qp *)0;
376 int __CPAchecker_TMP_1 = (int)(pkt->opcode);
376 pkt->mask = (pkt->mask) | ((u32 )((rxe_opcode[__CPAchecker_TMP_1]).mask));
378 -header_size(pkt)
{
937 int __CPAchecker_TMP_0 = (int)(pkt->offset);
937 int __CPAchecker_TMP_1 = (int)(pkt->opcode);
937 return (size_t )(__CPAchecker_TMP_0 + ((rxe_opcode[__CPAchecker_TMP_1]).length));;
}
378 size_t __CPAchecker_TMP_2 = (size_t )(skb->len);
378 -__builtin_expect(__CPAchecker_TMP_2 < tmp___3, 0L)
{
52 return exp;;
}
378 assume(!(tmp___4 != 0L));
381 -hdr_check(pkt)
{
216 struct rxe_dev *rxe;
217 struct rxe_port *port;
218 struct rxe_qp *qp;
219 unsigned int qpn;
220 unsigned int tmp;
221 int index;
222 int err;
223 struct ratelimit_state _rs;
224 int tmp___0;
225 unsigned char tmp___1;
226 long tmp___2;
227 void *tmp___3;
228 struct ratelimit_state _rs___0;
229 int tmp___4;
230 long tmp___5;
231 long tmp___6;
232 long tmp___7;
233 long tmp___8;
234 struct ratelimit_state _rs___1;
235 int tmp___9;
236 long tmp___10;
216 rxe = pkt->rxe;
217 port = &(rxe->port);
218 qp = (struct rxe_qp *)0;
219 -bth_qpn(pkt)
{
362 unsigned int tmp;
362 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
362 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
362 -__bth_qpn(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
198 struct rxe_bth *bth;
199 unsigned int tmp;
198 bth = (struct rxe_bth *)arg;
200 -__fswab32(bth->qpn)
{
57 unsigned int tmp;
58 -__arch_swab32(val)
{
9 Ignored inline assembler code
10 return val;;
}
58 return tmp;;
}
200 return tmp & 16777215U;;
}
362 return tmp;;
}
219 qpn = tmp;
223 -bth_tver(pkt)
{
342 unsigned char tmp;
342 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
342 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
342 -__bth_tver(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
169 struct rxe_bth *bth;
169 bth = (struct rxe_bth *)arg;
171 unsigned int __CPAchecker_TMP_0 = (unsigned int)(bth->flags);
171 return __CPAchecker_TMP_0 & 15U;;
}
342 return tmp;;
}
223 -__builtin_expect(((unsigned int)tmp___1) != 0U, 0L)
{
52 return exp;;
}
223 assume(!(tmp___2 != 0L));
228 assume(!(qpn != 16777215U));
249 -__builtin_expect(((pkt->mask) & 2U) == 0U, 0L)
{
52 return exp;;
}
249 assume(!(tmp___10 != 0L));
255 pkt->qp = qp;
256 return 0;;
}
382 -__builtin_expect(err != 0, 0L)
{
52 return exp;;
}
382 assume(!(tmp___5 != 0L));
386 unsigned long __CPAchecker_TMP_3 = (unsigned long)(pkt->paylen);
386 icrcp = (__be32 *)((pkt->hdr) + (__CPAchecker_TMP_3 + 18446744073709551612UL));
387 -__fswab32(*icrcp)
{
57 unsigned int tmp;
58 -__arch_swab32(val)
{
9 Ignored inline assembler code
10 return val;;
}
58 return tmp;;
}
387 pack_icrc = tmp___6;
389 -rxe_icrc_hdr(pkt, skb)
{
40 unsigned int bth_offset;
41 struct iphdr *ip4h;
42 struct ipv6hdr *ip6h;
43 struct udphdr *udph;
44 struct rxe_bth *bth;
45 int crc;
46 int length;
47 int hdr_size;
48 u8 pshdr[60U];
49 struct iphdr *tmp;
50 struct ipv6hdr *tmp___0;
51 unsigned int tmp___1;
52 unsigned int tmp___2;
40 bth_offset = 0U;
41 ip4h = (struct iphdr *)0;
42 ip6h = (struct ipv6hdr *)0;
47 int __CPAchecker_TMP_1;
47 unsigned int __CPAchecker_TMP_2 = (unsigned int)(skb->protocol);
47 assume(!(__CPAchecker_TMP_2 == 8U));
47 __CPAchecker_TMP_1 = 48;
47 hdr_size = __CPAchecker_TMP_1;
60 crc = -558161693;
62 unsigned int __CPAchecker_TMP_3 = (unsigned int)(skb->protocol);
62 assume(!(__CPAchecker_TMP_3 == 8U));
71 -ipv6_hdr((const struct sk_buff *)skb)
{
83 unsigned char *tmp;
83 -skb_network_header(skb)
{
2145 unsigned char *__CPAchecker_TMP_0 = (unsigned char *)(skb->head);
2145 unsigned long __CPAchecker_TMP_1 = (unsigned long)(skb->network_header);
2145 return __CPAchecker_TMP_0 + __CPAchecker_TMP_1;;
}
83 return (struct ipv6hdr *)tmp;;
}
71 __memcpy((void *)(&pshdr), (const void *)tmp___0, (size_t )hdr_size) { /* Function call is skipped due to function is undefined */}
72 ip6h = (struct ipv6hdr *)(&pshdr);
73 udph = ((struct udphdr *)ip6h) + 1U;
75 __memset((void *)(&(ip6h->flow_lbl)), 255, 3UL) { /* Function call is skipped due to function is undefined */}
76 ip6h->priority = 15U;
77 ip6h->hop_limit = 255U;
79 udph->check = 65535U;
81 bth_offset = bth_offset + ((unsigned int)hdr_size);
83 const void *__CPAchecker_TMP_4 = (const void *)(pkt->hdr);
83 __memcpy(((void *)(&pshdr)) + ((unsigned long)bth_offset), __CPAchecker_TMP_4, 12UL) { /* Function call is skipped due to function is undefined */}
84 bth = ((struct rxe_bth *)(&pshdr)) + ((unsigned long)bth_offset);
87 bth->qpn = (bth->qpn) | 255U;
89 length = hdr_size + 12;
90 tmp___1 = crc32_le((u32 )crc, (const unsigned char *)(&pshdr), (size_t )length) { /* Function call is skipped due to function is undefined */}
90 crc = (int)tmp___1;
93 const unsigned char *__CPAchecker_TMP_5 = (const unsigned char *)(pkt->hdr);
93 int __CPAchecker_TMP_6 = (int)(pkt->opcode);
93 tmp___2 = crc32_le((u32 )crc, __CPAchecker_TMP_5 + 12U, (size_t )(((rxe_opcode[__CPAchecker_TMP_6]).length) + -12)) { /* Function call is skipped due to function is undefined */}
93 crc = (int)tmp___2;
95 return (u32 )crc;;
}
390 -payload_size(pkt)
{
948 unsigned char tmp;
948 -bth_pad(pkt)
{
332 unsigned char tmp;
332 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
332 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
332 -__bth_pad(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
154 struct rxe_bth *bth;
154 bth = (struct rxe_bth *)arg;
156 unsigned int __CPAchecker_TMP_0 = (unsigned int)(bth->flags);
156 return (u8 )((__CPAchecker_TMP_0 & 48U) >> 4);;
}
332 return tmp;;
}
948 int __CPAchecker_TMP_0 = (int)(pkt->paylen);
948 int __CPAchecker_TMP_1 = (int)(pkt->opcode);
948 return (size_t )(((__CPAchecker_TMP_0 - (((rxe_opcode[__CPAchecker_TMP_1]).offset)[11])) - ((int)tmp)) + -4);;
}
390 -payload_addr(pkt)
{
942 unsigned long __CPAchecker_TMP_0 = (unsigned long)(pkt->offset);
942 int __CPAchecker_TMP_1 = (int)(pkt->opcode);
942 return (void *)((pkt->hdr) + (__CPAchecker_TMP_0 + ((unsigned long)(((rxe_opcode[__CPAchecker_TMP_1]).offset)[11]))));;
}
390 calc_icrc = crc32_le(calc_icrc, (const unsigned char *)tmp___8, tmp___7) { /* Function call is skipped due to function is undefined */}
391 -__fswab32(~calc_icrc)
{
57 unsigned int tmp;
58 -__arch_swab32(val)
{
9 Ignored inline assembler code
10 return val;;
}
58 return tmp;;
}
391 calc_icrc = tmp___9;
392 -__builtin_expect(calc_icrc != pack_icrc, 0L)
{
52 return exp;;
}
392 assume(!(tmp___13 != 0L));
406 -bth_qpn(pkt)
{
362 unsigned int tmp;
362 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
362 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
362 -__bth_qpn(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
198 struct rxe_bth *bth;
199 unsigned int tmp;
198 bth = (struct rxe_bth *)arg;
200 -__fswab32(bth->qpn)
{
57 unsigned int tmp;
58 -__arch_swab32(val)
{
9 Ignored inline assembler code
10 return val;;
}
58 return tmp;;
}
200 return tmp & 16777215U;;
}
362 return tmp;;
}
406 -__builtin_expect(tmp___14 == 16777215U, 0L)
{
52 return exp;;
}
406 assume(tmp___15 != 0L);
407 -rxe_rcv_mcast_pkt(rxe, skb)
{
277 struct rxe_pkt_info *pkt;
278 struct rxe_mc_grp *mcg;
279 struct sk_buff *skb_copy___0;
280 struct rxe_mc_elem *mce;
281 struct rxe_qp *qp;
282 union ib_gid dgid;
283 int err;
284 struct iphdr *tmp;
285 struct ipv6hdr *tmp___0;
286 void *tmp___1;
287 const struct list_head *__mptr;
288 unsigned int tmp___2;
289 struct sk_buff *tmp___3;
290 const struct list_head *__mptr___0;
277 pkt = (struct rxe_pkt_info *)(&(skb->cb));
285 unsigned int __CPAchecker_TMP_0 = (unsigned int)(skb->protocol);
285 assume(!(__CPAchecker_TMP_0 == 8U));
288 unsigned int __CPAchecker_TMP_1 = (unsigned int)(skb->protocol);
288 assume(!(__CPAchecker_TMP_1 == 56710U));
292 -rxe_pool_get_key(&(rxe->mc_grp_pool), (void *)(&dgid))
{
470 struct rb_node *node;
471 struct rxe_pool_entry *elem;
472 int cmp;
473 unsigned long flags;
474 const struct rb_node *__mptr;
470 node = (struct rb_node *)0;
471 elem = (struct rxe_pool_entry *)0;
475 -ldv_spin_lock()
{
52 ldv_spin = 1;
53 return ;;
}
477 unsigned int __CPAchecker_TMP_0 = (unsigned int)(pool->state);
477 assume(!(__CPAchecker_TMP_0 != 1U));
480 node = pool->tree.rb_node;
482 goto ldv_63933;
482 assume(((unsigned long)node) != ((unsigned long)((struct rb_node *)0)));
484 goto ldv_63932;
483 ldv_63932:;
483 __mptr = (const struct rb_node *)node;
483 elem = ((struct rxe_pool_entry *)__mptr) + 18446744073709551584UL;
485 cmp = memcmp(((const void *)elem) + (pool->key_offset), (const void *)key, pool->key_size) { /* Function call is skipped due to function is undefined */}
488 assume(!(cmp > 0));
490 assume(!(cmp < 0));
493 goto ldv_63931;
496 assume(((unsigned long)node) != ((unsigned long)((struct rb_node *)0)));
497 -kref_get(&(elem->ref_cnt))
{
42 _Bool __warned;
43 int __ret_warn_once;
44 int tmp;
45 int __ret_warn_on;
46 long tmp___0;
47 long tmp___1;
46 -atomic_add_return(1, &(kref->refcount))
{
156 int __ret;
156 __ret = i;
156 switch (4UL);
157 __case__[4UL == 4UL]
156 Ignored inline assembler code
156 goto ldv_3993;
156 return __ret + i;;
}
46 __ret_warn_once = tmp <= 1;
46 int __CPAchecker_TMP_0;
46 assume(!(__ret_warn_once != 0));
__CPAchecker_TMP_0 = 0;
46 -__builtin_expect((long)__CPAchecker_TMP_0, 0L)
{
52 return exp;;
}
46 assume(!(tmp___1 != 0L));
46 -__builtin_expect(__ret_warn_once != 0, 0L)
{
52 return exp;;
}
48 return ;;
}
501 out:;
500 -spin_unlock_irqrestore(&(pool->pool_lock), flags)
{
109 -ldv_spin_unlock()
{
59 ldv_spin = 0;
60 return ;;
}
111 -ldv_spin_unlock_irqrestore_52(lock, flags)
{
378 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}
379 return ;;
}
112 return ;;
}
501 void *__CPAchecker_TMP_1;
501 assume(((unsigned long)node) != ((unsigned long)((struct rb_node *)0)));
501 __CPAchecker_TMP_1 = (void *)elem;
501 return __CPAchecker_TMP_1;;
}
292 mcg = (struct rxe_mc_grp *)tmp___1;
293 assume(!(((unsigned long)mcg) == ((unsigned long)((struct rxe_mc_grp *)0))));
296 -spin_lock_bh(&(mcg->mcg_lock))
{
58 -ldv_spin_lock()
{
52 ldv_spin = 1;
53 return ;;
}
60 -ldv_spin_lock_bh_126(lock)
{
311 _raw_spin_lock_bh(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */}
312 return ;;
}
61 return ;;
}
298 __mptr = (const struct list_head *)(mcg->qp_list.next);
298 mce = ((struct rxe_mc_elem *)__mptr) + 18446744073709551552UL;
298 goto ldv_63902;
298 assume(((unsigned long)(&(mce->qp_list))) != ((unsigned long)(&(mcg->qp_list))));
300 goto ldv_63901;
299 ldv_63901:;
299 qp = mce->qp;
300 pkt = (struct rxe_pkt_info *)(&(skb->cb));
303 -check_type_state(rxe, pkt, qp)
{
41 long tmp;
42 enum ib_qp_type tmp___0;
43 struct ratelimit_state _rs;
44 int tmp___1;
45 long tmp___2;
46 struct ratelimit_state _rs___0;
47 int tmp___3;
48 long tmp___4;
49 struct ratelimit_state _rs___1;
50 int tmp___5;
51 long tmp___6;
52 struct ratelimit_state _rs___2;
53 int tmp___7;
54 long tmp___8;
55 long tmp___9;
42 -__builtin_expect((qp->valid) == 0U, 0L)
{
52 return exp;;
}
42 assume(!(tmp != 0L));
45 -qp_type(qp)
{
174 return qp->ibqp.qp_type;;
}
45 switch ((unsigned int)tmp___0);
46 assume(!(((unsigned int)tmp___0) == 2U));
52 assume(!(((unsigned int)tmp___0) == 3U));
58 assume(!(((unsigned int)tmp___0) == 4U));
59 assume(!(((unsigned int)tmp___0) == 0U));
60 assume(((unsigned int)tmp___0) == 1U);
61 int __CPAchecker_TMP_1 = (int)(pkt->opcode);
61 -__builtin_expect((__CPAchecker_TMP_1 & 96) == 0, 0L)
{
52 return exp;;
}
61 assume(!(tmp___6 != 0L));
65 goto ldv_63789;
70 ldv_63789:;
71 assume(!(((pkt->mask) & 4096U) != 0U));
74 int __CPAchecker_TMP_2;
74 assume(!(((unsigned int)(qp->req.state)) <= 1U));
74 assume(!(((unsigned int)(qp->req.state)) > 4U));
__CPAchecker_TMP_2 = 0;
74 -__builtin_expect((long)__CPAchecker_TMP_2, 0L)
{
52 return exp;;
}
74 assume(!(tmp___9 != 0L));
79 return 0;;
}
304 assume(!(err != 0));
307 -bth_qpn(pkt)
{
362 unsigned int tmp;
362 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
362 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
362 -__bth_qpn(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
198 struct rxe_bth *bth;
199 unsigned int tmp;
198 bth = (struct rxe_bth *)arg;
200 -__fswab32(bth->qpn)
{
57 unsigned int tmp;
58 -__arch_swab32(val)
{
9 Ignored inline assembler code
10 return val;;
}
58 return tmp;;
}
200 return tmp & 16777215U;;
}
362 return tmp;;
}
307 -check_keys(rxe, pkt, tmp___2, qp)
{
103 int i;
104 int found_pkey;
105 struct rxe_port *port;
106 unsigned short pkey;
107 unsigned short tmp;
108 int tmp___0;
109 struct ratelimit_state _rs;
110 int tmp___1;
111 struct ratelimit_state _rs___0;
112 int tmp___2;
113 int tmp___3;
114 long tmp___4;
115 unsigned int qkey;
116 struct ratelimit_state _rs___1;
117 unsigned int tmp___5;
118 int tmp___6;
119 unsigned int tmp___7;
120 long tmp___8;
121 enum ib_qp_type tmp___9;
122 enum ib_qp_type tmp___10;
105 found_pkey = 0;
106 port = &(rxe->port);
107 -bth_pkey(pkt)
{
352 unsigned short tmp;
352 void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);
352 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);
352 -__bth_pkey(__CPAchecker_TMP_0 + __CPAchecker_TMP_1)
{
184 struct rxe_bth *bth;
185 unsigned short tmp;
184 bth = (struct rxe_bth *)arg;
186 int __CPAchecker_TMP_0 = (int)(bth->pkey);
186 -__fswab16(__CPAchecker_TMP_0)
{
51 return (__u16 )(((int)((short)(((int)val) << 8))) | ((int)((short)(((int)val) >> 8))));;
}
186 return tmp;;
}
352 return tmp;;
}
107 pkey = tmp;
109 pkt->pkey_index = 0U;
111 assume(!(qpn == 1U));
125 assume(!(qpn != 0U));
136 -qp_type(qp)
{
174 return qp->ibqp.qp_type;;
}
136 assume(!(((unsigned int)tmp___9) == 4U));
136 -qp_type(qp)
{
174 return qp->ibqp.qp_type;;
}
136 assume(((unsigned int)tmp___10) == 1U);
137 _L:;
136 assume(!(qpn != 0U));
148 return 0;;
}
308 assume(!(err != 0));
314 assume(((unsigned long)(mce->qp_list.next)) != ((unsigned long)(&(mcg->qp_list))));
314 -ldv_skb_clone_203(skb, 37748928U)
{
320 void *tmp;
321 -ldv_check_alloc_flags(flags)
{
27 assume(ldv_spin != 0);
27 assume(flags != 34078752U);
27 assume(flags != 33554432U);
27 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_ATOMIC_H 2 #define _ASM_X86_ATOMIC_H 3 4 #include <linux/compiler.h> 5 #include <linux/types.h> 6 #include <asm/alternative.h> 7 #include <asm/cmpxchg.h> 8 #include <asm/rmwcc.h> 9 #include <asm/barrier.h> 10 11 /* 12 * Atomic operations that C can't guarantee us. Useful for 13 * resource counting etc.. 14 */ 15 16 #define ATOMIC_INIT(i) { (i) } 17 18 /** 19 * atomic_read - read atomic variable 20 * @v: pointer of type atomic_t 21 * 22 * Atomically reads the value of @v. 23 */ 24 static __always_inline int atomic_read(const atomic_t *v) 25 { 26 return READ_ONCE((v)->counter); 27 } 28 29 /** 30 * atomic_set - set atomic variable 31 * @v: pointer of type atomic_t 32 * @i: required value 33 * 34 * Atomically sets the value of @v to @i. 35 */ 36 static __always_inline void atomic_set(atomic_t *v, int i) 37 { 38 WRITE_ONCE(v->counter, i); 39 } 40 41 /** 42 * atomic_add - add integer to atomic variable 43 * @i: integer value to add 44 * @v: pointer of type atomic_t 45 * 46 * Atomically adds @i to @v. 47 */ 48 static __always_inline void atomic_add(int i, atomic_t *v) 49 { 50 asm volatile(LOCK_PREFIX "addl %1,%0" 51 : "+m" (v->counter) 52 : "ir" (i)); 53 } 54 55 /** 56 * atomic_sub - subtract integer from atomic variable 57 * @i: integer value to subtract 58 * @v: pointer of type atomic_t 59 * 60 * Atomically subtracts @i from @v. 61 */ 62 static __always_inline void atomic_sub(int i, atomic_t *v) 63 { 64 asm volatile(LOCK_PREFIX "subl %1,%0" 65 : "+m" (v->counter) 66 : "ir" (i)); 67 } 68 69 /** 70 * atomic_sub_and_test - subtract value from variable and test result 71 * @i: integer value to subtract 72 * @v: pointer of type atomic_t 73 * 74 * Atomically subtracts @i from @v and returns 75 * true if the result is zero, or false for all 76 * other cases. 77 */ 78 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) 79 { 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 81 } 82 83 /** 84 * atomic_inc - increment atomic variable 85 * @v: pointer of type atomic_t 86 * 87 * Atomically increments @v by 1. 88 */ 89 static __always_inline void atomic_inc(atomic_t *v) 90 { 91 asm volatile(LOCK_PREFIX "incl %0" 92 : "+m" (v->counter)); 93 } 94 95 /** 96 * atomic_dec - decrement atomic variable 97 * @v: pointer of type atomic_t 98 * 99 * Atomically decrements @v by 1. 100 */ 101 static __always_inline void atomic_dec(atomic_t *v) 102 { 103 asm volatile(LOCK_PREFIX "decl %0" 104 : "+m" (v->counter)); 105 } 106 107 /** 108 * atomic_dec_and_test - decrement and test 109 * @v: pointer of type atomic_t 110 * 111 * Atomically decrements @v by 1 and 112 * returns true if the result is 0, or false for all other 113 * cases. 114 */ 115 static __always_inline bool atomic_dec_and_test(atomic_t *v) 116 { 117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 118 } 119 120 /** 121 * atomic_inc_and_test - increment and test 122 * @v: pointer of type atomic_t 123 * 124 * Atomically increments @v by 1 125 * and returns true if the result is zero, or false for all 126 * other cases. 127 */ 128 static __always_inline bool atomic_inc_and_test(atomic_t *v) 129 { 130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 131 } 132 133 /** 134 * atomic_add_negative - add and test if negative 135 * @i: integer value to add 136 * @v: pointer of type atomic_t 137 * 138 * Atomically adds @i to @v and returns true 139 * if the result is negative, or false when 140 * result is greater than or equal to zero. 141 */ 142 static __always_inline bool atomic_add_negative(int i, atomic_t *v) 143 { 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 145 } 146 147 /** 148 * atomic_add_return - add integer and return 149 * @i: integer value to add 150 * @v: pointer of type atomic_t 151 * 152 * Atomically adds @i to @v and returns @i + @v 153 */ 154 static __always_inline int atomic_add_return(int i, atomic_t *v) 155 { 156 return i + xadd(&v->counter, i); 157 } 158 159 /** 160 * atomic_sub_return - subtract integer and return 161 * @v: pointer of type atomic_t 162 * @i: integer value to subtract 163 * 164 * Atomically subtracts @i from @v and returns @v - @i 165 */ 166 static __always_inline int atomic_sub_return(int i, atomic_t *v) 167 { 168 return atomic_add_return(-i, v); 169 } 170 171 #define atomic_inc_return(v) (atomic_add_return(1, v)) 172 #define atomic_dec_return(v) (atomic_sub_return(1, v)) 173 174 static __always_inline int atomic_fetch_add(int i, atomic_t *v) 175 { 176 return xadd(&v->counter, i); 177 } 178 179 static __always_inline int atomic_fetch_sub(int i, atomic_t *v) 180 { 181 return xadd(&v->counter, -i); 182 } 183 184 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) 185 { 186 return cmpxchg(&v->counter, old, new); 187 } 188 189 static inline int atomic_xchg(atomic_t *v, int new) 190 { 191 return xchg(&v->counter, new); 192 } 193 194 #define ATOMIC_OP(op) \ 195 static inline void atomic_##op(int i, atomic_t *v) \ 196 { \ 197 asm volatile(LOCK_PREFIX #op"l %1,%0" \ 198 : "+m" (v->counter) \ 199 : "ir" (i) \ 200 : "memory"); \ 201 } 202 203 #define ATOMIC_FETCH_OP(op, c_op) \ 204 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 205 { \ 206 int old, val = atomic_read(v); \ 207 for (;;) { \ 208 old = atomic_cmpxchg(v, val, val c_op i); \ 209 if (old == val) \ 210 break; \ 211 val = old; \ 212 } \ 213 return old; \ 214 } 215 216 #define ATOMIC_OPS(op, c_op) \ 217 ATOMIC_OP(op) \ 218 ATOMIC_FETCH_OP(op, c_op) 219 220 ATOMIC_OPS(and, &) 221 ATOMIC_OPS(or , |) 222 ATOMIC_OPS(xor, ^) 223 224 #undef ATOMIC_OPS 225 #undef ATOMIC_FETCH_OP 226 #undef ATOMIC_OP 227 228 /** 229 * __atomic_add_unless - add unless the number is already a given value 230 * @v: pointer of type atomic_t 231 * @a: the amount to add to v... 232 * @u: ...unless v is equal to u. 233 * 234 * Atomically adds @a to @v, so long as @v was not already @u. 235 * Returns the old value of @v. 236 */ 237 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) 238 { 239 int c, old; 240 c = atomic_read(v); 241 for (;;) { 242 if (unlikely(c == (u))) 243 break; 244 old = atomic_cmpxchg((v), c, c + (a)); 245 if (likely(old == c)) 246 break; 247 c = old; 248 } 249 return c; 250 } 251 252 /** 253 * atomic_inc_short - increment of a short integer 254 * @v: pointer to type int 255 * 256 * Atomically adds 1 to @v 257 * Returns the new value of @u 258 */ 259 static __always_inline short int atomic_inc_short(short int *v) 260 { 261 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); 262 return *v; 263 } 264 265 #ifdef CONFIG_X86_32 266 # include <asm/atomic64_32.h> 267 #else 268 # include <asm/atomic64_64.h> 269 #endif 270 271 #endif /* _ASM_X86_ATOMIC_H */
1 #ifndef _ASM_X86_SWAB_H 2 #define _ASM_X86_SWAB_H 3 4 #include <linux/types.h> 5 #include <linux/compiler.h> 6 7 static inline __attribute_const__ __u32 __arch_swab32(__u32 val) 8 { 9 asm("bswapl %0" : "=r" (val) : "0" (val)); 10 return val; 11 } 12 #define __arch_swab32 __arch_swab32 13 14 static inline __attribute_const__ __u64 __arch_swab64(__u64 val) 15 { 16 #ifdef __i386__ 17 union { 18 struct { 19 __u32 a; 20 __u32 b; 21 } s; 22 __u64 u; 23 } v; 24 v.u = val; 25 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 26 : "=r" (v.s.a), "=r" (v.s.b) 27 : "0" (v.s.a), "1" (v.s.b)); 28 return v.u; 29 #else /* __i386__ */ 30 asm("bswapq %0" : "=r" (val) : "0" (val)); 31 return val; 32 #endif 33 } 34 #define __arch_swab64 __arch_swab64 35 36 #endif /* _ASM_X86_SWAB_H */
1 2 #include <linux/kernel.h> 3 bool ldv_is_err(const void *ptr); 4 bool ldv_is_err_or_null(const void *ptr); 5 void* ldv_err_ptr(long error); 6 long ldv_ptr_err(const void *ptr); 7 8 extern void ldv_spin_lock(void); 9 extern void ldv_spin_unlock(void); 10 extern int ldv_spin_trylock(void); 11 12 #include <linux/kernel.h> 13 #include <verifier/rcv.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 17 extern void *ldv_undefined_pointer(void); 18 extern void ldv_check_alloc_flags(gfp_t flags); 19 extern void ldv_check_alloc_nonatomic(void); 20 /* Returns an arbitrary page in addition to checking flags */ 21 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags); 22 #line 1 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_comp.c" 23 /* 24 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 25 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 26 * 27 * This software is available to you under a choice of one of two 28 * licenses. You may choose to be licensed under the terms of the GNU 29 * General Public License (GPL) Version 2, available from the file 30 * COPYING in the main directory of this source tree, or the 31 * OpenIB.org BSD license below: 32 * 33 * Redistribution and use in source and binary forms, with or 34 * without modification, are permitted provided that the following 35 * conditions are met: 36 * 37 * - Redistributions of source code must retain the above 38 * copyright notice, this list of conditions and the following 39 * disclaimer. 40 * 41 * - Redistributions in binary form must reproduce the above 42 * copyright notice, this list of conditions and the following 43 * disclaimer in the documentation and/or other materials 44 * provided with the distribution. 45 * 46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 47 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 48 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 49 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 50 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 51 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 52 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 53 * SOFTWARE. 54 */ 55 56 #include <linux/skbuff.h> 57 58 #include "rxe.h" 59 #include "rxe_loc.h" 60 #include "rxe_queue.h" 61 #include "rxe_task.h" 62 63 enum comp_state { 64 COMPST_GET_ACK, 65 COMPST_GET_WQE, 66 COMPST_COMP_WQE, 67 COMPST_COMP_ACK, 68 COMPST_CHECK_PSN, 69 COMPST_CHECK_ACK, 70 COMPST_READ, 71 COMPST_ATOMIC, 72 COMPST_WRITE_SEND, 73 COMPST_UPDATE_COMP, 74 COMPST_ERROR_RETRY, 75 COMPST_RNR_RETRY, 76 COMPST_ERROR, 77 COMPST_EXIT, /* We have an issue, and we want to rerun the completer */ 78 COMPST_DONE, /* The completer finished successflly */ 79 }; 80 81 static char *comp_state_name[] = { 82 [COMPST_GET_ACK] = "GET ACK", 83 [COMPST_GET_WQE] = "GET WQE", 84 [COMPST_COMP_WQE] = "COMP WQE", 85 [COMPST_COMP_ACK] = "COMP ACK", 86 [COMPST_CHECK_PSN] = "CHECK PSN", 87 [COMPST_CHECK_ACK] = "CHECK ACK", 88 [COMPST_READ] = "READ", 89 [COMPST_ATOMIC] = "ATOMIC", 90 [COMPST_WRITE_SEND] = "WRITE/SEND", 91 [COMPST_UPDATE_COMP] = "UPDATE COMP", 92 [COMPST_ERROR_RETRY] = "ERROR RETRY", 93 [COMPST_RNR_RETRY] = "RNR RETRY", 94 [COMPST_ERROR] = "ERROR", 95 [COMPST_EXIT] = "EXIT", 96 [COMPST_DONE] = "DONE", 97 }; 98 99 static unsigned long rnrnak_usec[32] = { 100 [IB_RNR_TIMER_655_36] = 655360, 101 [IB_RNR_TIMER_000_01] = 10, 102 [IB_RNR_TIMER_000_02] = 20, 103 [IB_RNR_TIMER_000_03] = 30, 104 [IB_RNR_TIMER_000_04] = 40, 105 [IB_RNR_TIMER_000_06] = 60, 106 [IB_RNR_TIMER_000_08] = 80, 107 [IB_RNR_TIMER_000_12] = 120, 108 [IB_RNR_TIMER_000_16] = 160, 109 [IB_RNR_TIMER_000_24] = 240, 110 [IB_RNR_TIMER_000_32] = 320, 111 [IB_RNR_TIMER_000_48] = 480, 112 [IB_RNR_TIMER_000_64] = 640, 113 [IB_RNR_TIMER_000_96] = 960, 114 [IB_RNR_TIMER_001_28] = 1280, 115 [IB_RNR_TIMER_001_92] = 1920, 116 [IB_RNR_TIMER_002_56] = 2560, 117 [IB_RNR_TIMER_003_84] = 3840, 118 [IB_RNR_TIMER_005_12] = 5120, 119 [IB_RNR_TIMER_007_68] = 7680, 120 [IB_RNR_TIMER_010_24] = 10240, 121 [IB_RNR_TIMER_015_36] = 15360, 122 [IB_RNR_TIMER_020_48] = 20480, 123 [IB_RNR_TIMER_030_72] = 30720, 124 [IB_RNR_TIMER_040_96] = 40960, 125 [IB_RNR_TIMER_061_44] = 61410, 126 [IB_RNR_TIMER_081_92] = 81920, 127 [IB_RNR_TIMER_122_88] = 122880, 128 [IB_RNR_TIMER_163_84] = 163840, 129 [IB_RNR_TIMER_245_76] = 245760, 130 [IB_RNR_TIMER_327_68] = 327680, 131 [IB_RNR_TIMER_491_52] = 491520, 132 }; 133 134 static inline unsigned long rnrnak_jiffies(u8 timeout) 135 { 136 return max_t(unsigned long, 137 usecs_to_jiffies(rnrnak_usec[timeout]), 1); 138 } 139 140 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) 141 { 142 switch (opcode) { 143 case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE; 144 case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE; 145 case IB_WR_SEND: return IB_WC_SEND; 146 case IB_WR_SEND_WITH_IMM: return IB_WC_SEND; 147 case IB_WR_RDMA_READ: return IB_WC_RDMA_READ; 148 case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP; 149 case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD; 150 case IB_WR_LSO: return IB_WC_LSO; 151 case IB_WR_SEND_WITH_INV: return IB_WC_SEND; 152 case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ; 153 case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV; 154 case IB_WR_REG_MR: return IB_WC_REG_MR; 155 156 default: 157 return 0xff; 158 } 159 } 160 161 void retransmit_timer(unsigned long data) 162 { 163 struct rxe_qp *qp = (struct rxe_qp *)data; 164 165 if (qp->valid) { 166 qp->comp.timeout = 1; 167 rxe_run_task(&qp->comp.task, 1); 168 } 169 } 170 171 void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, 172 struct sk_buff *skb) 173 { 174 int must_sched; 175 176 skb_queue_tail(&qp->resp_pkts, skb); 177 178 must_sched = skb_queue_len(&qp->resp_pkts) > 1; 179 rxe_run_task(&qp->comp.task, must_sched); 180 } 181 182 static inline enum comp_state get_wqe(struct rxe_qp *qp, 183 struct rxe_pkt_info *pkt, 184 struct rxe_send_wqe **wqe_p) 185 { 186 struct rxe_send_wqe *wqe; 187 188 /* we come here whether or not we found a response packet to see if 189 * there are any posted WQEs 190 */ 191 wqe = queue_head(qp->sq.queue); 192 *wqe_p = wqe; 193 194 /* no WQE or requester has not started it yet */ 195 if (!wqe || wqe->state == wqe_state_posted) 196 return pkt ? COMPST_DONE : COMPST_EXIT; 197 198 /* WQE does not require an ack */ 199 if (wqe->state == wqe_state_done) 200 return COMPST_COMP_WQE; 201 202 /* WQE caused an error */ 203 if (wqe->state == wqe_state_error) 204 return COMPST_ERROR; 205 206 /* we have a WQE, if we also have an ack check its PSN */ 207 return pkt ? COMPST_CHECK_PSN : COMPST_EXIT; 208 } 209 210 static inline void reset_retry_counters(struct rxe_qp *qp) 211 { 212 qp->comp.retry_cnt = qp->attr.retry_cnt; 213 qp->comp.rnr_retry = qp->attr.rnr_retry; 214 } 215 216 static inline enum comp_state check_psn(struct rxe_qp *qp, 217 struct rxe_pkt_info *pkt, 218 struct rxe_send_wqe *wqe) 219 { 220 s32 diff; 221 222 /* check to see if response is past the oldest WQE. if it is, complete 223 * send/write or error read/atomic 224 */ 225 diff = psn_compare(pkt->psn, wqe->last_psn); 226 if (diff > 0) { 227 if (wqe->state == wqe_state_pending) { 228 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) 229 return COMPST_ERROR_RETRY; 230 231 reset_retry_counters(qp); 232 return COMPST_COMP_WQE; 233 } else { 234 return COMPST_DONE; 235 } 236 } 237 238 /* compare response packet to expected response */ 239 diff = psn_compare(pkt->psn, qp->comp.psn); 240 if (diff < 0) { 241 /* response is most likely a retried packet if it matches an 242 * uncompleted WQE go complete it else ignore it 243 */ 244 if (pkt->psn == wqe->last_psn) 245 return COMPST_COMP_ACK; 246 else 247 return COMPST_DONE; 248 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) { 249 return COMPST_ERROR_RETRY; 250 } else { 251 return COMPST_CHECK_ACK; 252 } 253 } 254 255 static inline enum comp_state check_ack(struct rxe_qp *qp, 256 struct rxe_pkt_info *pkt, 257 struct rxe_send_wqe *wqe) 258 { 259 unsigned int mask = pkt->mask; 260 u8 syn; 261 262 /* Check the sequence only */ 263 switch (qp->comp.opcode) { 264 case -1: 265 /* Will catch all *_ONLY cases. */ 266 if (!(mask & RXE_START_MASK)) 267 return COMPST_ERROR; 268 269 break; 270 271 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST: 272 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: 273 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && 274 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { 275 return COMPST_ERROR; 276 } 277 break; 278 default: 279 WARN_ON(1); 280 } 281 282 /* Check operation validity. */ 283 switch (pkt->opcode) { 284 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST: 285 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST: 286 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY: 287 syn = aeth_syn(pkt); 288 289 if ((syn & AETH_TYPE_MASK) != AETH_ACK) 290 return COMPST_ERROR; 291 292 /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE 293 * doesn't have an AETH) 294 */ 295 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: 296 if (wqe->wr.opcode != IB_WR_RDMA_READ && 297 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { 298 return COMPST_ERROR; 299 } 300 reset_retry_counters(qp); 301 return COMPST_READ; 302 303 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE: 304 syn = aeth_syn(pkt); 305 306 if ((syn & AETH_TYPE_MASK) != AETH_ACK) 307 return COMPST_ERROR; 308 309 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP && 310 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD) 311 return COMPST_ERROR; 312 reset_retry_counters(qp); 313 return COMPST_ATOMIC; 314 315 case IB_OPCODE_RC_ACKNOWLEDGE: 316 syn = aeth_syn(pkt); 317 switch (syn & AETH_TYPE_MASK) { 318 case AETH_ACK: 319 reset_retry_counters(qp); 320 return COMPST_WRITE_SEND; 321 322 case AETH_RNR_NAK: 323 return COMPST_RNR_RETRY; 324 325 case AETH_NAK: 326 switch (syn) { 327 case AETH_NAK_PSN_SEQ_ERROR: 328 /* a nak implicitly acks all packets with psns 329 * before 330 */ 331 if (psn_compare(pkt->psn, qp->comp.psn) > 0) { 332 qp->comp.psn = pkt->psn; 333 if (qp->req.wait_psn) { 334 qp->req.wait_psn = 0; 335 rxe_run_task(&qp->req.task, 1); 336 } 337 } 338 return COMPST_ERROR_RETRY; 339 340 case AETH_NAK_INVALID_REQ: 341 wqe->status = IB_WC_REM_INV_REQ_ERR; 342 return COMPST_ERROR; 343 344 case AETH_NAK_REM_ACC_ERR: 345 wqe->status = IB_WC_REM_ACCESS_ERR; 346 return COMPST_ERROR; 347 348 case AETH_NAK_REM_OP_ERR: 349 wqe->status = IB_WC_REM_OP_ERR; 350 return COMPST_ERROR; 351 352 default: 353 pr_warn("unexpected nak %x\n", syn); 354 wqe->status = IB_WC_REM_OP_ERR; 355 return COMPST_ERROR; 356 } 357 358 default: 359 return COMPST_ERROR; 360 } 361 break; 362 363 default: 364 pr_warn("unexpected opcode\n"); 365 } 366 367 return COMPST_ERROR; 368 } 369 370 static inline enum comp_state do_read(struct rxe_qp *qp, 371 struct rxe_pkt_info *pkt, 372 struct rxe_send_wqe *wqe) 373 { 374 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 375 int ret; 376 377 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, 378 &wqe->dma, payload_addr(pkt), 379 payload_size(pkt), to_mem_obj, NULL); 380 if (ret) 381 return COMPST_ERROR; 382 383 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK)) 384 return COMPST_COMP_ACK; 385 else 386 return COMPST_UPDATE_COMP; 387 } 388 389 static inline enum comp_state do_atomic(struct rxe_qp *qp, 390 struct rxe_pkt_info *pkt, 391 struct rxe_send_wqe *wqe) 392 { 393 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 394 int ret; 395 396 u64 atomic_orig = atmack_orig(pkt); 397 398 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, 399 &wqe->dma, &atomic_orig, 400 sizeof(u64), to_mem_obj, NULL); 401 if (ret) 402 return COMPST_ERROR; 403 else 404 return COMPST_COMP_ACK; 405 } 406 407 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 408 struct rxe_cqe *cqe) 409 { 410 memset(cqe, 0, sizeof(*cqe)); 411 412 if (!qp->is_user) { 413 struct ib_wc *wc = &cqe->ibwc; 414 415 wc->wr_id = wqe->wr.wr_id; 416 wc->status = wqe->status; 417 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode); 418 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || 419 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) 420 wc->wc_flags = IB_WC_WITH_IMM; 421 wc->byte_len = wqe->dma.length; 422 wc->qp = &qp->ibqp; 423 } else { 424 struct ib_uverbs_wc *uwc = &cqe->uibwc; 425 426 uwc->wr_id = wqe->wr.wr_id; 427 uwc->status = wqe->status; 428 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode); 429 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || 430 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) 431 uwc->wc_flags = IB_WC_WITH_IMM; 432 uwc->byte_len = wqe->dma.length; 433 uwc->qp_num = qp->ibqp.qp_num; 434 } 435 } 436 437 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 438 { 439 struct rxe_cqe cqe; 440 441 if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) || 442 (wqe->wr.send_flags & IB_SEND_SIGNALED) || 443 (qp->req.state == QP_STATE_ERROR)) { 444 make_send_cqe(qp, wqe, &cqe); 445 rxe_cq_post(qp->scq, &cqe, 0); 446 } 447 448 advance_consumer(qp->sq.queue); 449 450 /* 451 * we completed something so let req run again 452 * if it is trying to fence 453 */ 454 if (qp->req.wait_fence) { 455 qp->req.wait_fence = 0; 456 rxe_run_task(&qp->req.task, 1); 457 } 458 } 459 460 static inline enum comp_state complete_ack(struct rxe_qp *qp, 461 struct rxe_pkt_info *pkt, 462 struct rxe_send_wqe *wqe) 463 { 464 unsigned long flags; 465 466 if (wqe->has_rd_atomic) { 467 wqe->has_rd_atomic = 0; 468 atomic_inc(&qp->req.rd_atomic); 469 if (qp->req.need_rd_atomic) { 470 qp->comp.timeout_retry = 0; 471 qp->req.need_rd_atomic = 0; 472 rxe_run_task(&qp->req.task, 1); 473 } 474 } 475 476 if (unlikely(qp->req.state == QP_STATE_DRAIN)) { 477 /* state_lock used by requester & completer */ 478 spin_lock_irqsave(&qp->state_lock, flags); 479 if ((qp->req.state == QP_STATE_DRAIN) && 480 (qp->comp.psn == qp->req.psn)) { 481 qp->req.state = QP_STATE_DRAINED; 482 spin_unlock_irqrestore(&qp->state_lock, flags); 483 484 if (qp->ibqp.event_handler) { 485 struct ib_event ev; 486 487 ev.device = qp->ibqp.device; 488 ev.element.qp = &qp->ibqp; 489 ev.event = IB_EVENT_SQ_DRAINED; 490 qp->ibqp.event_handler(&ev, 491 qp->ibqp.qp_context); 492 } 493 } else { 494 spin_unlock_irqrestore(&qp->state_lock, flags); 495 } 496 } 497 498 do_complete(qp, wqe); 499 500 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) 501 return COMPST_UPDATE_COMP; 502 else 503 return COMPST_DONE; 504 } 505 506 static inline enum comp_state complete_wqe(struct rxe_qp *qp, 507 struct rxe_pkt_info *pkt, 508 struct rxe_send_wqe *wqe) 509 { 510 qp->comp.opcode = -1; 511 512 if (pkt) { 513 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) 514 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; 515 516 if (qp->req.wait_psn) { 517 qp->req.wait_psn = 0; 518 rxe_run_task(&qp->req.task, 1); 519 } 520 } 521 522 do_complete(qp, wqe); 523 524 return COMPST_GET_WQE; 525 } 526 527 int rxe_completer(void *arg) 528 { 529 struct rxe_qp *qp = (struct rxe_qp *)arg; 530 struct rxe_send_wqe *wqe = wqe; 531 struct sk_buff *skb = NULL; 532 struct rxe_pkt_info *pkt = NULL; 533 enum comp_state state; 534 535 if (!qp->valid) { 536 while ((skb = skb_dequeue(&qp->resp_pkts))) { 537 rxe_drop_ref(qp); 538 kfree_skb(skb); 539 } 540 skb = NULL; 541 pkt = NULL; 542 543 while (queue_head(qp->sq.queue)) 544 advance_consumer(qp->sq.queue); 545 546 goto exit; 547 } 548 549 if (qp->req.state == QP_STATE_ERROR) { 550 while ((skb = skb_dequeue(&qp->resp_pkts))) { 551 rxe_drop_ref(qp); 552 kfree_skb(skb); 553 } 554 skb = NULL; 555 pkt = NULL; 556 557 while ((wqe = queue_head(qp->sq.queue))) { 558 wqe->status = IB_WC_WR_FLUSH_ERR; 559 do_complete(qp, wqe); 560 } 561 562 goto exit; 563 } 564 565 if (qp->req.state == QP_STATE_RESET) { 566 while ((skb = skb_dequeue(&qp->resp_pkts))) { 567 rxe_drop_ref(qp); 568 kfree_skb(skb); 569 } 570 skb = NULL; 571 pkt = NULL; 572 573 while (queue_head(qp->sq.queue)) 574 advance_consumer(qp->sq.queue); 575 576 goto exit; 577 } 578 579 if (qp->comp.timeout) { 580 qp->comp.timeout_retry = 1; 581 qp->comp.timeout = 0; 582 } else { 583 qp->comp.timeout_retry = 0; 584 } 585 586 if (qp->req.need_retry) 587 goto exit; 588 589 state = COMPST_GET_ACK; 590 591 while (1) { 592 pr_debug("state = %s\n", comp_state_name[state]); 593 switch (state) { 594 case COMPST_GET_ACK: 595 skb = skb_dequeue(&qp->resp_pkts); 596 if (skb) { 597 pkt = SKB_TO_PKT(skb); 598 qp->comp.timeout_retry = 0; 599 } 600 state = COMPST_GET_WQE; 601 break; 602 603 case COMPST_GET_WQE: 604 state = get_wqe(qp, pkt, &wqe); 605 break; 606 607 case COMPST_CHECK_PSN: 608 state = check_psn(qp, pkt, wqe); 609 break; 610 611 case COMPST_CHECK_ACK: 612 state = check_ack(qp, pkt, wqe); 613 break; 614 615 case COMPST_READ: 616 state = do_read(qp, pkt, wqe); 617 break; 618 619 case COMPST_ATOMIC: 620 state = do_atomic(qp, pkt, wqe); 621 break; 622 623 case COMPST_WRITE_SEND: 624 if (wqe->state == wqe_state_pending && 625 wqe->last_psn == pkt->psn) 626 state = COMPST_COMP_ACK; 627 else 628 state = COMPST_UPDATE_COMP; 629 break; 630 631 case COMPST_COMP_ACK: 632 state = complete_ack(qp, pkt, wqe); 633 break; 634 635 case COMPST_COMP_WQE: 636 state = complete_wqe(qp, pkt, wqe); 637 break; 638 639 case COMPST_UPDATE_COMP: 640 if (pkt->mask & RXE_END_MASK) 641 qp->comp.opcode = -1; 642 else 643 qp->comp.opcode = pkt->opcode; 644 645 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) 646 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; 647 648 if (qp->req.wait_psn) { 649 qp->req.wait_psn = 0; 650 rxe_run_task(&qp->req.task, 1); 651 } 652 653 state = COMPST_DONE; 654 break; 655 656 case COMPST_DONE: 657 if (pkt) { 658 rxe_drop_ref(pkt->qp); 659 kfree_skb(skb); 660 } 661 goto done; 662 663 case COMPST_EXIT: 664 if (qp->comp.timeout_retry && wqe) { 665 state = COMPST_ERROR_RETRY; 666 break; 667 } 668 669 /* re reset the timeout counter if 670 * (1) QP is type RC 671 * (2) the QP is alive 672 * (3) there is a packet sent by the requester that 673 * might be acked (we still might get spurious 674 * timeouts but try to keep them as few as possible) 675 * (4) the timeout parameter is set 676 */ 677 if ((qp_type(qp) == IB_QPT_RC) && 678 (qp->req.state == QP_STATE_READY) && 679 (psn_compare(qp->req.psn, qp->comp.psn) > 0) && 680 qp->qp_timeout_jiffies) 681 mod_timer(&qp->retrans_timer, 682 jiffies + qp->qp_timeout_jiffies); 683 goto exit; 684 685 case COMPST_ERROR_RETRY: 686 /* we come here if the retry timer fired and we did 687 * not receive a response packet. try to retry the send 688 * queue if that makes sense and the limits have not 689 * been exceeded. remember that some timeouts are 690 * spurious since we do not reset the timer but kick 691 * it down the road or let it expire 692 */ 693 694 /* there is nothing to retry in this case */ 695 if (!wqe || (wqe->state == wqe_state_posted)) 696 goto exit; 697 698 if (qp->comp.retry_cnt > 0) { 699 if (qp->comp.retry_cnt != 7) 700 qp->comp.retry_cnt--; 701 702 /* no point in retrying if we have already 703 * seen the last ack that the requester could 704 * have caused 705 */ 706 if (psn_compare(qp->req.psn, 707 qp->comp.psn) > 0) { 708 /* tell the requester to retry the 709 * send send queue next time around 710 */ 711 qp->req.need_retry = 1; 712 rxe_run_task(&qp->req.task, 1); 713 } 714 goto exit; 715 } else { 716 wqe->status = IB_WC_RETRY_EXC_ERR; 717 state = COMPST_ERROR; 718 } 719 break; 720 721 case COMPST_RNR_RETRY: 722 if (qp->comp.rnr_retry > 0) { 723 if (qp->comp.rnr_retry != 7) 724 qp->comp.rnr_retry--; 725 726 qp->req.need_retry = 1; 727 pr_debug("set rnr nak timer\n"); 728 mod_timer(&qp->rnr_nak_timer, 729 jiffies + rnrnak_jiffies(aeth_syn(pkt) 730 & ~AETH_TYPE_MASK)); 731 goto exit; 732 } else { 733 wqe->status = IB_WC_RNR_RETRY_EXC_ERR; 734 state = COMPST_ERROR; 735 } 736 break; 737 738 case COMPST_ERROR: 739 do_complete(qp, wqe); 740 rxe_qp_error(qp); 741 goto exit; 742 } 743 } 744 745 exit: 746 /* we come here if we are done with processing and want the task to 747 * exit from the loop calling us 748 */ 749 return -EAGAIN; 750 751 done: 752 /* we come here if we have processed a packet we want the task to call 753 * us again to see if there is anything else to do 754 */ 755 return 0; 756 } 757 758 #line 22 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_comp.o.c.prepared"
1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include "rxe.h" 35 #include "rxe_loc.h" 36 37 /* Compute a partial ICRC for all the IB transport headers. */ 38 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb) 39 { 40 unsigned int bth_offset = 0; 41 struct iphdr *ip4h = NULL; 42 struct ipv6hdr *ip6h = NULL; 43 struct udphdr *udph; 44 struct rxe_bth *bth; 45 int crc; 46 int length; 47 int hdr_size = sizeof(struct udphdr) + 48 (skb->protocol == htons(ETH_P_IP) ? 49 sizeof(struct iphdr) : sizeof(struct ipv6hdr)); 50 /* pseudo header buffer size is calculate using ipv6 header size since 51 * it is bigger than ipv4 52 */ 53 u8 pshdr[sizeof(struct udphdr) + 54 sizeof(struct ipv6hdr) + 55 RXE_BTH_BYTES]; 56 57 /* This seed is the result of computing a CRC with a seed of 58 * 0xfffffff and 8 bytes of 0xff representing a masked LRH. 59 */ 60 crc = 0xdebb20e3; 61 62 if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */ 63 memcpy(pshdr, ip_hdr(skb), hdr_size); 64 ip4h = (struct iphdr *)pshdr; 65 udph = (struct udphdr *)(ip4h + 1); 66 67 ip4h->ttl = 0xff; 68 ip4h->check = CSUM_MANGLED_0; 69 ip4h->tos = 0xff; 70 } else { /* IPv6 */ 71 memcpy(pshdr, ipv6_hdr(skb), hdr_size); 72 ip6h = (struct ipv6hdr *)pshdr; 73 udph = (struct udphdr *)(ip6h + 1); 74 75 memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl)); 76 ip6h->priority = 0xf; 77 ip6h->hop_limit = 0xff; 78 } 79 udph->check = CSUM_MANGLED_0; 80 81 bth_offset += hdr_size; 82 83 memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES); 84 bth = (struct rxe_bth *)&pshdr[bth_offset]; 85 86 /* exclude bth.resv8a */ 87 bth->qpn |= cpu_to_be32(~BTH_QPN_MASK); 88 89 length = hdr_size + RXE_BTH_BYTES; 90 crc = crc32_le(crc, pshdr, length); 91 92 /* And finish to compute the CRC on the remainder of the headers. */ 93 crc = crc32_le(crc, pkt->hdr + RXE_BTH_BYTES, 94 rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES); 95 return crc; 96 }
1 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/if_arp.h> 37 #include <linux/netdevice.h> 38 #include <linux/if.h> 39 #include <linux/if_vlan.h> 40 #include <net/udp_tunnel.h> 41 #include <net/sch_generic.h> 42 #include <linux/netfilter.h> 43 #include <rdma/ib_addr.h> 44 45 #include "rxe.h" 46 #include "rxe_net.h" 47 #include "rxe_loc.h" 48 49 static LIST_HEAD(rxe_dev_list); 50 static spinlock_t dev_list_lock; /* spinlock for device list */ 51 52 struct rxe_dev *net_to_rxe(struct net_device *ndev) 53 { 54 struct rxe_dev *rxe; 55 struct rxe_dev *found = NULL; 56 57 spin_lock_bh(&dev_list_lock); 58 list_for_each_entry(rxe, &rxe_dev_list, list) { 59 if (rxe->ndev == ndev) { 60 found = rxe; 61 break; 62 } 63 } 64 spin_unlock_bh(&dev_list_lock); 65 66 return found; 67 } 68 69 struct rxe_dev *get_rxe_by_name(const char* name) 70 { 71 struct rxe_dev *rxe; 72 struct rxe_dev *found = NULL; 73 74 spin_lock_bh(&dev_list_lock); 75 list_for_each_entry(rxe, &rxe_dev_list, list) { 76 if (!strcmp(name, rxe->ib_dev.name)) { 77 found = rxe; 78 break; 79 } 80 } 81 spin_unlock_bh(&dev_list_lock); 82 return found; 83 } 84 85 86 struct rxe_recv_sockets recv_sockets; 87 88 static __be64 rxe_mac_to_eui64(struct net_device *ndev) 89 { 90 unsigned char *mac_addr = ndev->dev_addr; 91 __be64 eui64; 92 unsigned char *dst = (unsigned char *)&eui64; 93 94 dst[0] = mac_addr[0] ^ 2; 95 dst[1] = mac_addr[1]; 96 dst[2] = mac_addr[2]; 97 dst[3] = 0xff; 98 dst[4] = 0xfe; 99 dst[5] = mac_addr[3]; 100 dst[6] = mac_addr[4]; 101 dst[7] = mac_addr[5]; 102 103 return eui64; 104 } 105 106 static __be64 node_guid(struct rxe_dev *rxe) 107 { 108 return rxe_mac_to_eui64(rxe->ndev); 109 } 110 111 static __be64 port_guid(struct rxe_dev *rxe) 112 { 113 return rxe_mac_to_eui64(rxe->ndev); 114 } 115 116 static struct device *dma_device(struct rxe_dev *rxe) 117 { 118 struct net_device *ndev; 119 120 ndev = rxe->ndev; 121 122 if (ndev->priv_flags & IFF_802_1Q_VLAN) 123 ndev = vlan_dev_real_dev(ndev); 124 125 return ndev->dev.parent; 126 } 127 128 static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) 129 { 130 int err; 131 unsigned char ll_addr[ETH_ALEN]; 132 133 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); 134 err = dev_mc_add(rxe->ndev, ll_addr); 135 136 return err; 137 } 138 139 static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) 140 { 141 int err; 142 unsigned char ll_addr[ETH_ALEN]; 143 144 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); 145 err = dev_mc_del(rxe->ndev, ll_addr); 146 147 return err; 148 } 149 150 static struct dst_entry *rxe_find_route4(struct net_device *ndev, 151 struct in_addr *saddr, 152 struct in_addr *daddr) 153 { 154 struct rtable *rt; 155 struct flowi4 fl = { { 0 } }; 156 157 memset(&fl, 0, sizeof(fl)); 158 fl.flowi4_oif = ndev->ifindex; 159 memcpy(&fl.saddr, saddr, sizeof(*saddr)); 160 memcpy(&fl.daddr, daddr, sizeof(*daddr)); 161 fl.flowi4_proto = IPPROTO_UDP; 162 163 rt = ip_route_output_key(&init_net, &fl); 164 if (IS_ERR(rt)) { 165 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr); 166 return NULL; 167 } 168 169 return &rt->dst; 170 } 171 172 #if IS_ENABLED(CONFIG_IPV6) 173 static struct dst_entry *rxe_find_route6(struct net_device *ndev, 174 struct in6_addr *saddr, 175 struct in6_addr *daddr) 176 { 177 struct dst_entry *ndst; 178 struct flowi6 fl6 = { { 0 } }; 179 180 memset(&fl6, 0, sizeof(fl6)); 181 fl6.flowi6_oif = ndev->ifindex; 182 memcpy(&fl6.saddr, saddr, sizeof(*saddr)); 183 memcpy(&fl6.daddr, daddr, sizeof(*daddr)); 184 fl6.flowi6_proto = IPPROTO_UDP; 185 186 if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), 187 recv_sockets.sk6->sk, &ndst, &fl6))) { 188 pr_err_ratelimited("no route to %pI6\n", daddr); 189 goto put; 190 } 191 192 if (unlikely(ndst->error)) { 193 pr_err("no route to %pI6\n", daddr); 194 goto put; 195 } 196 197 return ndst; 198 put: 199 dst_release(ndst); 200 return NULL; 201 } 202 203 #else 204 205 static struct dst_entry *rxe_find_route6(struct net_device *ndev, 206 struct in6_addr *saddr, 207 struct in6_addr *daddr) 208 { 209 return NULL; 210 } 211 212 #endif 213 214 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 215 { 216 struct udphdr *udph; 217 struct net_device *ndev = skb->dev; 218 struct rxe_dev *rxe = net_to_rxe(ndev); 219 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 220 221 if (!rxe) 222 goto drop; 223 224 if (skb_linearize(skb)) { 225 pr_err("skb_linearize failed\n"); 226 goto drop; 227 } 228 229 udph = udp_hdr(skb); 230 pkt->rxe = rxe; 231 pkt->port_num = 1; 232 pkt->hdr = (u8 *)(udph + 1); 233 pkt->mask = RXE_GRH_MASK; 234 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); 235 236 return rxe_rcv(skb); 237 drop: 238 kfree_skb(skb); 239 return 0; 240 } 241 242 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, 243 bool ipv6) 244 { 245 int err; 246 struct socket *sock; 247 struct udp_port_cfg udp_cfg; 248 struct udp_tunnel_sock_cfg tnl_cfg; 249 250 memset(&udp_cfg, 0, sizeof(udp_cfg)); 251 252 if (ipv6) { 253 udp_cfg.family = AF_INET6; 254 udp_cfg.ipv6_v6only = 1; 255 } else { 256 udp_cfg.family = AF_INET; 257 } 258 259 udp_cfg.local_udp_port = port; 260 261 /* Create UDP socket */ 262 err = udp_sock_create(net, &udp_cfg, &sock); 263 if (err < 0) { 264 pr_err("failed to create udp socket. err = %d\n", err); 265 return ERR_PTR(err); 266 } 267 268 tnl_cfg.sk_user_data = NULL; 269 tnl_cfg.encap_type = 1; 270 tnl_cfg.encap_rcv = rxe_udp_encap_recv; 271 tnl_cfg.encap_destroy = NULL; 272 273 /* Setup UDP tunnel */ 274 setup_udp_tunnel_sock(net, sock, &tnl_cfg); 275 276 return sock; 277 } 278 279 static void rxe_release_udp_tunnel(struct socket *sk) 280 { 281 udp_tunnel_sock_release(sk); 282 } 283 284 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, 285 __be16 dst_port) 286 { 287 struct udphdr *udph; 288 289 __skb_push(skb, sizeof(*udph)); 290 skb_reset_transport_header(skb); 291 udph = udp_hdr(skb); 292 293 udph->dest = dst_port; 294 udph->source = src_port; 295 udph->len = htons(skb->len); 296 udph->check = 0; 297 } 298 299 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, 300 __be32 saddr, __be32 daddr, __u8 proto, 301 __u8 tos, __u8 ttl, __be16 df, bool xnet) 302 { 303 struct iphdr *iph; 304 305 skb_scrub_packet(skb, xnet); 306 307 skb_clear_hash(skb); 308 skb_dst_set(skb, dst); 309 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 310 311 skb_push(skb, sizeof(struct iphdr)); 312 skb_reset_network_header(skb); 313 314 iph = ip_hdr(skb); 315 316 iph->version = IPVERSION; 317 iph->ihl = sizeof(struct iphdr) >> 2; 318 iph->frag_off = df; 319 iph->protocol = proto; 320 iph->tos = tos; 321 iph->daddr = daddr; 322 iph->saddr = saddr; 323 iph->ttl = ttl; 324 __ip_select_ident(dev_net(dst->dev), iph, 325 skb_shinfo(skb)->gso_segs ?: 1); 326 iph->tot_len = htons(skb->len); 327 ip_send_check(iph); 328 } 329 330 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, 331 struct in6_addr *saddr, struct in6_addr *daddr, 332 __u8 proto, __u8 prio, __u8 ttl) 333 { 334 struct ipv6hdr *ip6h; 335 336 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 337 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED 338 | IPSKB_REROUTED); 339 skb_dst_set(skb, dst); 340 341 __skb_push(skb, sizeof(*ip6h)); 342 skb_reset_network_header(skb); 343 ip6h = ipv6_hdr(skb); 344 ip6_flow_hdr(ip6h, prio, htonl(0)); 345 ip6h->payload_len = htons(skb->len); 346 ip6h->nexthdr = proto; 347 ip6h->hop_limit = ttl; 348 ip6h->daddr = *daddr; 349 ip6h->saddr = *saddr; 350 ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); 351 } 352 353 static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av) 354 { 355 struct dst_entry *dst; 356 bool xnet = false; 357 __be16 df = htons(IP_DF); 358 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; 359 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; 360 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 361 362 dst = rxe_find_route4(rxe->ndev, saddr, daddr); 363 if (!dst) { 364 pr_err("Host not reachable\n"); 365 return -EHOSTUNREACH; 366 } 367 368 if (!memcmp(saddr, daddr, sizeof(*daddr))) 369 pkt->mask |= RXE_LOOPBACK_MASK; 370 371 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT), 372 htons(ROCE_V2_UDP_DPORT)); 373 374 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP, 375 av->grh.traffic_class, av->grh.hop_limit, df, xnet); 376 return 0; 377 } 378 379 static int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av) 380 { 381 struct dst_entry *dst; 382 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; 383 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; 384 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 385 386 dst = rxe_find_route6(rxe->ndev, saddr, daddr); 387 if (!dst) { 388 pr_err("Host not reachable\n"); 389 return -EHOSTUNREACH; 390 } 391 392 if (!memcmp(saddr, daddr, sizeof(*daddr))) 393 pkt->mask |= RXE_LOOPBACK_MASK; 394 395 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT), 396 htons(ROCE_V2_UDP_DPORT)); 397 398 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP, 399 av->grh.traffic_class, 400 av->grh.hop_limit); 401 return 0; 402 } 403 404 static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, 405 struct sk_buff *skb, u32 *crc) 406 { 407 int err = 0; 408 struct rxe_av *av = rxe_get_av(pkt); 409 410 if (av->network_type == RDMA_NETWORK_IPV4) 411 err = prepare4(rxe, skb, av); 412 else if (av->network_type == RDMA_NETWORK_IPV6) 413 err = prepare6(rxe, skb, av); 414 415 *crc = rxe_icrc_hdr(pkt, skb); 416 417 return err; 418 } 419 420 static void rxe_skb_tx_dtor(struct sk_buff *skb) 421 { 422 struct sock *sk = skb->sk; 423 struct rxe_qp *qp = sk->sk_user_data; 424 int skb_out = atomic_dec_return(&qp->skb_out); 425 426 if (unlikely(qp->need_req_skb && 427 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) 428 rxe_run_task(&qp->req.task, 1); 429 } 430 431 static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, 432 struct sk_buff *skb) 433 { 434 struct sk_buff *nskb; 435 struct rxe_av *av; 436 int err; 437 438 av = rxe_get_av(pkt); 439 440 nskb = skb_clone(skb, GFP_ATOMIC); 441 if (!nskb) 442 return -ENOMEM; 443 444 nskb->destructor = rxe_skb_tx_dtor; 445 nskb->sk = pkt->qp->sk->sk; 446 447 if (av->network_type == RDMA_NETWORK_IPV4) { 448 err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb); 449 } else if (av->network_type == RDMA_NETWORK_IPV6) { 450 err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb); 451 } else { 452 pr_err("Unknown layer 3 protocol: %d\n", av->network_type); 453 kfree_skb(nskb); 454 return -EINVAL; 455 } 456 457 if (unlikely(net_xmit_eval(err))) { 458 pr_debug("error sending packet: %d\n", err); 459 return -EAGAIN; 460 } 461 462 kfree_skb(skb); 463 464 return 0; 465 } 466 467 static int loopback(struct sk_buff *skb) 468 { 469 return rxe_rcv(skb); 470 } 471 472 static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av) 473 { 474 return rxe->port.port_guid == av->grh.dgid.global.interface_id; 475 } 476 477 static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av, 478 int paylen, struct rxe_pkt_info *pkt) 479 { 480 unsigned int hdr_len; 481 struct sk_buff *skb; 482 483 if (av->network_type == RDMA_NETWORK_IPV4) 484 hdr_len = ETH_HLEN + sizeof(struct udphdr) + 485 sizeof(struct iphdr); 486 else 487 hdr_len = ETH_HLEN + sizeof(struct udphdr) + 488 sizeof(struct ipv6hdr); 489 490 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev), 491 GFP_ATOMIC); 492 if (unlikely(!skb)) 493 return NULL; 494 495 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev)); 496 497 skb->dev = rxe->ndev; 498 if (av->network_type == RDMA_NETWORK_IPV4) 499 skb->protocol = htons(ETH_P_IP); 500 else 501 skb->protocol = htons(ETH_P_IPV6); 502 503 pkt->rxe = rxe; 504 pkt->port_num = 1; 505 pkt->hdr = skb_put(skb, paylen); 506 pkt->mask |= RXE_GRH_MASK; 507 508 memset(pkt->hdr, 0, paylen); 509 510 return skb; 511 } 512 513 /* 514 * this is required by rxe_cfg to match rxe devices in 515 * /sys/class/infiniband up with their underlying ethernet devices 516 */ 517 static char *parent_name(struct rxe_dev *rxe, unsigned int port_num) 518 { 519 return rxe->ndev->name; 520 } 521 522 static enum rdma_link_layer link_layer(struct rxe_dev *rxe, 523 unsigned int port_num) 524 { 525 return IB_LINK_LAYER_ETHERNET; 526 } 527 528 static struct rxe_ifc_ops ifc_ops = { 529 .node_guid = node_guid, 530 .port_guid = port_guid, 531 .dma_device = dma_device, 532 .mcast_add = mcast_add, 533 .mcast_delete = mcast_delete, 534 .prepare = prepare, 535 .send = send, 536 .loopback = loopback, 537 .init_packet = init_packet, 538 .parent_name = parent_name, 539 .link_layer = link_layer, 540 }; 541 542 struct rxe_dev *rxe_net_add(struct net_device *ndev) 543 { 544 int err; 545 struct rxe_dev *rxe = NULL; 546 547 rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe)); 548 if (!rxe) 549 return NULL; 550 551 rxe->ifc_ops = &ifc_ops; 552 rxe->ndev = ndev; 553 554 err = rxe_add(rxe, ndev->mtu); 555 if (err) { 556 ib_dealloc_device(&rxe->ib_dev); 557 return NULL; 558 } 559 560 spin_lock_bh(&dev_list_lock); 561 list_add_tail(&rxe_dev_list, &rxe->list); 562 spin_unlock_bh(&dev_list_lock); 563 return rxe; 564 } 565 566 void rxe_remove_all(void) 567 { 568 spin_lock_bh(&dev_list_lock); 569 while (!list_empty(&rxe_dev_list)) { 570 struct rxe_dev *rxe = 571 list_first_entry(&rxe_dev_list, struct rxe_dev, list); 572 573 list_del(&rxe->list); 574 spin_unlock_bh(&dev_list_lock); 575 rxe_remove(rxe); 576 spin_lock_bh(&dev_list_lock); 577 } 578 spin_unlock_bh(&dev_list_lock); 579 } 580 EXPORT_SYMBOL(rxe_remove_all); 581 582 static void rxe_port_event(struct rxe_dev *rxe, 583 enum ib_event_type event) 584 { 585 struct ib_event ev; 586 587 ev.device = &rxe->ib_dev; 588 ev.element.port_num = 1; 589 ev.event = event; 590 591 ib_dispatch_event(&ev); 592 } 593 594 /* Caller must hold net_info_lock */ 595 void rxe_port_up(struct rxe_dev *rxe) 596 { 597 struct rxe_port *port; 598 599 port = &rxe->port; 600 port->attr.state = IB_PORT_ACTIVE; 601 port->attr.phys_state = IB_PHYS_STATE_LINK_UP; 602 603 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); 604 pr_info("rxe: set %s active\n", rxe->ib_dev.name); 605 return; 606 } 607 608 /* Caller must hold net_info_lock */ 609 void rxe_port_down(struct rxe_dev *rxe) 610 { 611 struct rxe_port *port; 612 613 port = &rxe->port; 614 port->attr.state = IB_PORT_DOWN; 615 port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN; 616 617 rxe_port_event(rxe, IB_EVENT_PORT_ERR); 618 pr_info("rxe: set %s down\n", rxe->ib_dev.name); 619 return; 620 } 621 622 static int rxe_notify(struct notifier_block *not_blk, 623 unsigned long event, 624 void *arg) 625 { 626 struct net_device *ndev = netdev_notifier_info_to_dev(arg); 627 struct rxe_dev *rxe = net_to_rxe(ndev); 628 629 if (!rxe) 630 goto out; 631 632 switch (event) { 633 case NETDEV_UNREGISTER: 634 list_del(&rxe->list); 635 rxe_remove(rxe); 636 break; 637 case NETDEV_UP: 638 rxe_port_up(rxe); 639 break; 640 case NETDEV_DOWN: 641 rxe_port_down(rxe); 642 break; 643 case NETDEV_CHANGEMTU: 644 pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu); 645 rxe_set_mtu(rxe, ndev->mtu); 646 break; 647 case NETDEV_REBOOT: 648 case NETDEV_CHANGE: 649 case NETDEV_GOING_DOWN: 650 case NETDEV_CHANGEADDR: 651 case NETDEV_CHANGENAME: 652 case NETDEV_FEAT_CHANGE: 653 default: 654 pr_info("rxe: ignoring netdev event = %ld for %s\n", 655 event, ndev->name); 656 break; 657 } 658 out: 659 return NOTIFY_OK; 660 } 661 662 static struct notifier_block rxe_net_notifier = { 663 .notifier_call = rxe_notify, 664 }; 665 666 int rxe_net_init(void) 667 { 668 int err; 669 670 spin_lock_init(&dev_list_lock); 671 672 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, 673 htons(ROCE_V2_UDP_DPORT), true); 674 if (IS_ERR(recv_sockets.sk6)) { 675 recv_sockets.sk6 = NULL; 676 pr_err("rxe: Failed to create IPv6 UDP tunnel\n"); 677 return -1; 678 } 679 680 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, 681 htons(ROCE_V2_UDP_DPORT), false); 682 if (IS_ERR(recv_sockets.sk4)) { 683 rxe_release_udp_tunnel(recv_sockets.sk6); 684 recv_sockets.sk4 = NULL; 685 recv_sockets.sk6 = NULL; 686 pr_err("rxe: Failed to create IPv4 UDP tunnel\n"); 687 return -1; 688 } 689 690 err = register_netdevice_notifier(&rxe_net_notifier); 691 if (err) { 692 rxe_release_udp_tunnel(recv_sockets.sk6); 693 rxe_release_udp_tunnel(recv_sockets.sk4); 694 pr_err("rxe: Failed to rigister netdev notifier\n"); 695 } 696 697 return err; 698 } 699 700 void rxe_net_exit(void) 701 { 702 if (recv_sockets.sk6) 703 rxe_release_udp_tunnel(recv_sockets.sk6); 704 705 if (recv_sockets.sk4) 706 rxe_release_udp_tunnel(recv_sockets.sk4); 707 708 unregister_netdevice_notifier(&rxe_net_notifier); 709 } 710 711 712 713 714 715 /* LDV_COMMENT_BEGIN_MAIN */ 716 #ifdef LDV_MAIN19_sequence_infinite_withcheck_stateful 717 718 /*###########################################################################*/ 719 720 /*############## Driver Environment Generator 0.2 output ####################*/ 721 722 /*###########################################################################*/ 723 724 725 726 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 727 void ldv_check_final_state(void); 728 729 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 730 void ldv_check_return_value(int res); 731 732 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 733 void ldv_check_return_value_probe(int res); 734 735 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 736 void ldv_initialize(void); 737 738 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 739 void ldv_handler_precall(void); 740 741 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 742 int nondet_int(void); 743 744 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 745 int LDV_IN_INTERRUPT; 746 747 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 748 void ldv_main19_sequence_infinite_withcheck_stateful(void) { 749 750 751 752 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 753 /*============================= VARIABLE DECLARATION PART =============================*/ 754 /** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/ 755 /* content: static __be64 node_guid(struct rxe_dev *rxe)*/ 756 /* LDV_COMMENT_END_PREP */ 757 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "node_guid" */ 758 struct rxe_dev * var_group1; 759 /* LDV_COMMENT_BEGIN_PREP */ 760 #if IS_ENABLED(CONFIG_IPV6) 761 #else 762 #endif 763 /* LDV_COMMENT_END_PREP */ 764 /* content: static __be64 port_guid(struct rxe_dev *rxe)*/ 765 /* LDV_COMMENT_END_PREP */ 766 /* LDV_COMMENT_BEGIN_PREP */ 767 #if IS_ENABLED(CONFIG_IPV6) 768 #else 769 #endif 770 /* LDV_COMMENT_END_PREP */ 771 /* content: static struct device *dma_device(struct rxe_dev *rxe)*/ 772 /* LDV_COMMENT_END_PREP */ 773 /* LDV_COMMENT_BEGIN_PREP */ 774 #if IS_ENABLED(CONFIG_IPV6) 775 #else 776 #endif 777 /* LDV_COMMENT_END_PREP */ 778 /* content: static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)*/ 779 /* LDV_COMMENT_END_PREP */ 780 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mcast_add" */ 781 union ib_gid * var_mcast_add_6_p1; 782 /* LDV_COMMENT_BEGIN_PREP */ 783 #if IS_ENABLED(CONFIG_IPV6) 784 #else 785 #endif 786 /* LDV_COMMENT_END_PREP */ 787 /* content: static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)*/ 788 /* LDV_COMMENT_END_PREP */ 789 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mcast_delete" */ 790 union ib_gid * var_mcast_delete_7_p1; 791 /* LDV_COMMENT_BEGIN_PREP */ 792 #if IS_ENABLED(CONFIG_IPV6) 793 #else 794 #endif 795 /* LDV_COMMENT_END_PREP */ 796 /* content: static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)*/ 797 /* LDV_COMMENT_BEGIN_PREP */ 798 #if IS_ENABLED(CONFIG_IPV6) 799 #else 800 #endif 801 /* LDV_COMMENT_END_PREP */ 802 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prepare" */ 803 struct rxe_pkt_info * var_group2; 804 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prepare" */ 805 struct sk_buff * var_prepare_19_p2; 806 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prepare" */ 807 u32 * var_prepare_19_p3; 808 /* content: static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)*/ 809 /* LDV_COMMENT_BEGIN_PREP */ 810 #if IS_ENABLED(CONFIG_IPV6) 811 #else 812