 Error Trace
 Error Trace
        
                          [Home]
Bug # 150
Show/hide error trace|       Error trace     
         {    19     typedef signed char __s8;    20     typedef unsigned char __u8;    22     typedef short __s16;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    29     typedef long long __s64;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    32     typedef __u16 __le16;    33     typedef __u16 __be16;    35     typedef __u32 __be32;    37     typedef __u64 __be64;    40     typedef __u32 __wsum;   257     struct kernel_symbol {   unsigned long value;   const char *name; } ;    33     struct module ;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   106     typedef __u8 uint8_t;   108     typedef __u32 uint32_t;   111     typedef __u64 uint64_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   176     struct __anonstruct_atomic_t_6 {   int counter; } ;   176     typedef struct __anonstruct_atomic_t_6 atomic_t;   181     struct __anonstruct_atomic64_t_7 {   long counter; } ;   181     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   182     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   187     struct hlist_node ;   187     struct hlist_head {   struct hlist_node *first; } ;   191     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   202     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;   125     typedef void (*ctor_fn_t)();    67     struct ctl_table ;    58     struct device ;    64     struct net_device ;   467     struct file_operations ;   479     struct completion ;   480     struct pt_regs ;    27     union __anonunion___u_9 {   struct list_head *__val;   char __c[1U]; } ;   556     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   111     struct timespec ;   112     struct compat_timespec ;   113     struct __anonstruct_futex_25 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;   113     struct __anonstruct_nanosleep_26 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;   113     struct pollfd ;   113     struct __anonstruct_poll_27 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;   113     union __anonunion____missing_field_name_24 {   struct __anonstruct_futex_25 futex;   struct __anonstruct_nanosleep_26 nanosleep;   struct __anonstruct_poll_27 poll; } ;   113     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_24 __annonCompField4; } ;    39     struct page ;    26     struct task_struct ;    27     struct mm_struct ;   288     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_30 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_31 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_29 {   struct __anonstruct____missing_field_name_30 __annonCompField5;   struct __anonstruct____missing_field_name_31 __annonCompField6; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_29 __annonCompField7; } ;    13     typedef unsigned long pteval_t;    14     typedef unsigned long pmdval_t;    16     typedef unsigned long pgdval_t;    17     typedef unsigned long pgprotval_t;    19     struct __anonstruct_pte_t_32 {   pteval_t pte; } ;    19     typedef struct __anonstruct_pte_t_32 pte_t;    21     struct pgprot {   pgprotval_t pgprot; } ;   256     typedef struct pgprot pgprot_t;   258     struct __anonstruct_pgd_t_33 {   pgdval_t pgd; } ;   258     typedef struct __anonstruct_pgd_t_33 pgd_t;   297     struct __anonstruct_pmd_t_35 {   pmdval_t pmd; } ;   297     typedef struct __anonstruct_pmd_t_35 pmd_t;   423     typedef struct page *pgtable_t;   434     struct file ;   447     struct seq_file ;   483     struct thread_struct ;   485     struct cpumask ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t wait_lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   247     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;   341     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   654     typedef struct cpumask *cpumask_var_t;    38     union __anonunion___u_44 {   int __val;   char __c[1U]; } ;    23     typedef atomic64_t atomic_long_t;    81     struct static_key {   atomic_t enabled; } ;    22     struct tracepoint_func {   void *func;   void *data;   int prio; } ;    28     struct tracepoint {   const char *name;   struct static_key key;   void (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;   254     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_59 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_60 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_58 {   struct __anonstruct____missing_field_name_59 __annonCompField13;   struct __anonstruct____missing_field_name_60 __annonCompField14; } ;    26     union __anonunion____missing_field_name_61 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_58 __annonCompField15;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_61 __annonCompField16; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   227     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   233     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 extended_state_area[0U]; } ;   254     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   271     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   unsigned char counter;   union fpregs_state state; } ;   169     struct seq_operations ;   372     struct perf_event ;   377     struct __anonstruct_mm_segment_t_73 {   unsigned long seg; } ;   377     typedef struct __anonstruct_mm_segment_t_73 mm_segment_t;   378     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fsbase;   unsigned long gsbase;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   mm_segment_t addr_limit;   unsigned char sig_on_uaccess_err;   unsigned char uaccess_err;   struct fpu fpu; } ;    33     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct hlist_node hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   207     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   572     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_75 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_74 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_75 __annonCompField19; } ;    33     struct spinlock {   union __anonunion____missing_field_name_74 __annonCompField20; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_76 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_76 rwlock_t;   416     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   407     struct __anonstruct_seqlock_t_91 {   struct seqcount seqcount;   spinlock_t lock; } ;   407     typedef struct __anonstruct_seqlock_t_91 seqlock_t;   601     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;     7     typedef __s64 time64_t;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_92 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_92 kuid_t;    27     struct __anonstruct_kgid_t_93 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_93 kgid_t;   139     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    36     struct vm_area_struct ;    38     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    43     typedef struct __wait_queue_head wait_queue_head_t;    97     struct __anonstruct_nodemask_t_94 {   unsigned long bits[16U]; } ;    97     typedef struct __anonstruct_nodemask_t_94 nodemask_t;    80     struct free_area {   struct list_head free_list[6U];   unsigned long nr_free; } ;    92     struct pglist_data ;    93     struct zone_padding {   char x[0U]; } ;   208     struct zone_reclaim_stat {   unsigned long recent_rotated[2U];   unsigned long recent_scanned[2U]; } ;   221     struct lruvec {   struct list_head lists[5U];   struct zone_reclaim_stat reclaim_stat;   atomic_long_t inactive_age;   struct pglist_data *pgdat; } ;   247     typedef unsigned int isolate_mode_t;   255     struct per_cpu_pages {   int count;   int high;   int batch;   struct list_head lists[3U]; } ;   268     struct per_cpu_pageset {   struct per_cpu_pages pcp;   s8 expire;   s8 stat_threshold;   s8 vm_stat_diff[21U]; } ;   278     struct per_cpu_nodestat {   s8 stat_threshold;   s8 vm_node_stat_diff[26U]; } ;   284     enum zone_type {   ZONE_DMA = 0,   ZONE_DMA32 = 1,   ZONE_NORMAL = 2,   ZONE_MOVABLE = 3,   __MAX_NR_ZONES = 4 } ;   292     struct zone {   unsigned long watermark[3U];   unsigned long nr_reserved_highatomic;   long lowmem_reserve[4U];   int node;   struct pglist_data *zone_pgdat;   struct per_cpu_pageset *pageset;   unsigned long zone_start_pfn;   unsigned long managed_pages;   unsigned long spanned_pages;   unsigned long present_pages;   const char *name;   unsigned long nr_isolate_pageblock;   wait_queue_head_t *wait_table;   unsigned long wait_table_hash_nr_entries;   unsigned long wait_table_bits;   struct zone_padding _pad1_;   struct free_area free_area[11U];   unsigned long flags;   spinlock_t lock;   struct zone_padding _pad2_;   unsigned long percpu_drift_mark;   unsigned long compact_cached_free_pfn;   unsigned long compact_cached_migrate_pfn[2U];   unsigned int compact_considered;   unsigned int compact_defer_shift;   int compact_order_failed;   bool compact_blockskip_flush;   bool contiguous;   struct zone_padding _pad3_;   atomic_long_t vm_stat[21U]; } ;   560     struct zoneref {   struct zone *zone;   int zone_idx; } ;   585     struct zonelist {   struct zoneref _zonerefs[4097U]; } ;   608     struct pglist_data {   struct zone node_zones[4U];   struct zonelist node_zonelists[2U];   int nr_zones;   unsigned long node_start_pfn;   unsigned long node_present_pages;   unsigned long node_spanned_pages;   int node_id;   wait_queue_head_t kswapd_wait;   wait_queue_head_t pfmemalloc_wait;   struct task_struct *kswapd;   int kswapd_order;   enum zone_type kswapd_classzone_idx;   int kcompactd_max_order;   enum zone_type kcompactd_classzone_idx;   wait_queue_head_t kcompactd_wait;   struct task_struct *kcompactd;   spinlock_t numabalancing_migrate_lock;   unsigned long numabalancing_migrate_next_window;   unsigned long numabalancing_migrate_nr_pages;   unsigned long totalreserve_pages;   unsigned long min_unmapped_pages;   unsigned long min_slab_pages;   struct zone_padding _pad1_;   spinlock_t lru_lock;   spinlock_t split_queue_lock;   struct list_head split_queue;   unsigned long split_queue_len;   struct lruvec lruvec;   unsigned int inactive_ratio;   unsigned long flags;   struct zone_padding _pad2_;   struct per_cpu_nodestat *per_cpu_nodestats;   atomic_long_t vm_stat[26U]; } ;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   177     struct rw_semaphore ;   178     struct rw_semaphore {   atomic_long_t count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;   178     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   446     union ktime {   s64 tv64; } ;    41     typedef union ktime ktime_t;  1144     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   256     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;   835     struct nsproxy ;   836     struct ctl_table_root ;   837     struct ctl_table_header ;   838     struct ctl_dir ;    38     typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);    58     struct ctl_table_poll {   atomic_t event;   wait_queue_head_t wait; } ;    97     struct ctl_table {   const char *procname;   void *data;   int maxlen;   umode_t mode;   struct ctl_table *child;   proc_handler *proc_handler;   struct ctl_table_poll *poll;   void *extra1;   void *extra2; } ;   118     struct ctl_node {   struct rb_node node;   struct ctl_table_header *header; } ;   123     struct __anonstruct____missing_field_name_100 {   struct ctl_table *ctl_table;   int used;   int count;   int nreg; } ;   123     union __anonunion____missing_field_name_99 {   struct __anonstruct____missing_field_name_100 __annonCompField21;   struct callback_head rcu; } ;   123     struct ctl_table_set ;   123     struct ctl_table_header {   union __anonunion____missing_field_name_99 __annonCompField22;   struct completion *unregistering;   struct ctl_table *ctl_table_arg;   struct ctl_table_root *root;   struct ctl_table_set *set;   struct ctl_dir *parent;   struct ctl_node *node; } ;   144     struct ctl_dir {   struct ctl_table_header header;   struct rb_root root; } ;   150     struct ctl_table_set {   int (*is_seen)(struct ctl_table_set *);   struct ctl_dir dir; } ;   155     struct ctl_table_root {   struct ctl_table_set default_set;   struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *);   int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;   278     struct workqueue_struct ;   279     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;   268     struct notifier_block ;    53     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   336     struct wake_irq ;   337     struct pm_domain_data ;   338     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   556     struct dev_pm_qos ;   556     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   bool no_pm_callbacks;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   bool ignore_children;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   616     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;    26     struct ldt_struct ;    26     struct vdso_image ;    26     struct __anonstruct_mm_context_t_165 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   const struct vdso_image *vdso_image;   atomic_t perf_rdpmc_allowed; } ;    26     typedef struct __anonstruct_mm_context_t_165 mm_context_t;    22     struct bio_vec ;  1276     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    37     struct cred ;    19     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_211 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_212 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_210 {   struct __anonstruct____missing_field_name_211 __annonCompField35;   struct __anonstruct____missing_field_name_212 __annonCompField36; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_210 __annonCompField37;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   110     struct xol_area ;   111     struct uprobes_state {   struct xol_area *xol_area; } ;   150     struct address_space ;   151     struct mem_cgroup ;   152     union __anonunion____missing_field_name_213 {   struct address_space *mapping;   void *s_mem;   atomic_t compound_mapcount; } ;   152     union __anonunion____missing_field_name_214 {   unsigned long index;   void *freelist; } ;   152     struct __anonstruct____missing_field_name_218 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   152     union __anonunion____missing_field_name_217 {   atomic_t _mapcount;   unsigned int active;   struct __anonstruct____missing_field_name_218 __annonCompField40;   int units; } ;   152     struct __anonstruct____missing_field_name_216 {   union __anonunion____missing_field_name_217 __annonCompField41;   atomic_t _refcount; } ;   152     union __anonunion____missing_field_name_215 {   unsigned long counters;   struct __anonstruct____missing_field_name_216 __annonCompField42; } ;   152     struct dev_pagemap ;   152     struct __anonstruct____missing_field_name_220 {   struct page *next;   int pages;   int pobjects; } ;   152     struct __anonstruct____missing_field_name_221 {   unsigned long compound_head;   unsigned int compound_dtor;   unsigned int compound_order; } ;   152     struct __anonstruct____missing_field_name_222 {   unsigned long __pad;   pgtable_t pmd_huge_pte; } ;   152     union __anonunion____missing_field_name_219 {   struct list_head lru;   struct dev_pagemap *pgmap;   struct __anonstruct____missing_field_name_220 __annonCompField44;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_221 __annonCompField45;   struct __anonstruct____missing_field_name_222 __annonCompField46; } ;   152     struct kmem_cache ;   152     union __anonunion____missing_field_name_223 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache; } ;   152     struct page {   unsigned long flags;   union __anonunion____missing_field_name_213 __annonCompField38;   union __anonunion____missing_field_name_214 __annonCompField39;   union __anonunion____missing_field_name_215 __annonCompField43;   union __anonunion____missing_field_name_219 __annonCompField47;   union __anonunion____missing_field_name_223 __annonCompField48;   struct mem_cgroup *mem_cgroup; } ;   197     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   282     struct userfaultfd_ctx ;   282     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   289     struct __anonstruct_shared_224 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   289     struct anon_vma ;   289     struct vm_operations_struct ;   289     struct mempolicy ;   289     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_224 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   362     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   367     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   381     struct task_rss_stat {   int events;   int count[4U]; } ;   389     struct mm_rss_stat {   atomic_long_t count[4U]; } ;   394     struct kioctx_table ;   395     struct linux_binfmt ;   395     struct mmu_notifier_mm ;   395     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long data_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   void *bd_addr;   atomic_long_t hugetlb_usage;   struct work_struct async_put_work; } ;   565     struct vm_fault ;   619     struct vdso_image {   void *data;   unsigned long size;   unsigned long alt;   unsigned long alt_len;   long sym_vvar_start;   long sym_vvar_page;   long sym_hpet_page;   long sym_pvclock_page;   long sym_VDSO32_NOTE_MASK;   long sym___kernel_sigreturn;   long sym___kernel_rt_sigreturn;   long sym___kernel_vsyscall;   long sym_int80_landing_pad; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    18     typedef __u64 Elf64_Off;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;   219     struct elf64_hdr {   unsigned char e_ident[16U];   Elf64_Half e_type;   Elf64_Half e_machine;   Elf64_Word e_version;   Elf64_Addr e_entry;   Elf64_Off e_phoff;   Elf64_Off e_shoff;   Elf64_Word e_flags;   Elf64_Half e_ehsize;   Elf64_Half e_phentsize;   Elf64_Half e_phnum;   Elf64_Half e_shentsize;   Elf64_Half e_shnum;   Elf64_Half e_shstrndx; } ;   235     typedef struct elf64_hdr Elf64_Ehdr;   314     struct elf64_shdr {   Elf64_Word sh_name;   Elf64_Word sh_type;   Elf64_Xword sh_flags;   Elf64_Addr sh_addr;   Elf64_Off sh_offset;   Elf64_Xword sh_size;   Elf64_Word sh_link;   Elf64_Word sh_info;   Elf64_Xword sh_addralign;   Elf64_Xword sh_entsize; } ;   326     typedef struct elf64_shdr Elf64_Shdr;    53     union __anonunion____missing_field_name_229 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    53     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion____missing_field_name_229 __annonCompField49; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   167     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   199     struct dentry ;   200     struct iattr ;   201     struct super_block ;   202     struct file_system_type ;   203     struct kernfs_open_node ;   204     struct kernfs_iattrs ;   227     struct kernfs_root ;   227     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    96     union __anonunion____missing_field_name_234 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    96     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_234 __annonCompField50;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   138     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);   int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;   157     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   173     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   struct mutex prealloc_mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   191     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   499     struct sock ;   500     struct kobject ;   501     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   507     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   umode_t  (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    92     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   165     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   530     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct kernel_param ;   228     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_237 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_237 __annonCompField51; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   470     struct exception_table_entry ;    24     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    39     struct module_param_attrs ;    39     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    50     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;   277     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   284     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   291     struct module_layout {   void *base;   unsigned int size;   unsigned int text_size;   unsigned int ro_size;   unsigned int ro_after_init_size;   struct mod_tree_node mtn; } ;   307     struct mod_kallsyms {   Elf64_Sym *symtab;   unsigned int num_symtab;   char *strtab; } ;   321     struct klp_modinfo {   Elf64_Ehdr hdr;   Elf64_Shdr *sechdrs;   char *secstrings;   unsigned int symndx; } ;   329     struct module_sect_attrs ;   329     struct module_notes_attrs ;   329     struct trace_event_call ;   329     struct trace_enum_map ;   329     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   struct module_layout core_layout;   struct module_layout init_layout;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   struct mod_kallsyms *kallsyms;   struct mod_kallsyms core_kallsyms;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   unsigned int num_ftrace_callsites;   unsigned long *ftrace_callsites;   bool klp;   bool klp_alive;   struct klp_modinfo *klp_info;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    78     struct user_struct ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    24     struct __anonstruct_sigset_t_245 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_245 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_247 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_248 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_249 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_250 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_253 {   void *_lower;   void *_upper; } ;    11     union __anonunion____missing_field_name_252 {   struct __anonstruct__addr_bnd_253 _addr_bnd;   __u32 _pkey; } ;    11     struct __anonstruct__sigfault_251 {   void *_addr;   short _addr_lsb;   union __anonunion____missing_field_name_252 __annonCompField52; } ;    11     struct __anonstruct__sigpoll_254 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_255 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_246 {   int _pad[28U];   struct __anonstruct__kill_247 _kill;   struct __anonstruct__timer_248 _timer;   struct __anonstruct__rt_249 _rt;   struct __anonstruct__sigchld_250 _sigchld;   struct __anonstruct__sigfault_251 _sigfault;   struct __anonstruct__sigpoll_254 _sigpoll;   struct __anonstruct__sigsys_255 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_246 _sifields; } ;   118     typedef struct siginfo siginfo_t;    22     struct sigpending {   struct list_head list;   sigset_t signal; } ;   257     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   271     struct k_sigaction {   struct sigaction sa; } ;   457     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   464     struct pid_namespace ;   464     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    43     struct seccomp_filter ;    44     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   u8 state;   u8 is_rel;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   125     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   158     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;    12     enum kcov_mode {   KCOV_MODE_DISABLED = 0,   KCOV_MODE_TRACE = 1 } ;    17     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    41     struct assoc_array_ptr ;    41     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;    91     union key_payload {   void *rcu_data0;   void *data[4U]; } ;   128     union __anonunion____missing_field_name_290 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   128     struct key_user ;   128     union __anonunion____missing_field_name_291 {   time_t expiry;   time_t revoked_at; } ;   128     struct __anonstruct____missing_field_name_293 {   struct key_type *type;   char *description; } ;   128     union __anonunion____missing_field_name_292 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_293 __annonCompField55; } ;   128     struct __anonstruct____missing_field_name_295 {   struct list_head name_link;   struct assoc_array keys; } ;   128     union __anonunion____missing_field_name_294 {   union key_payload payload;   struct __anonstruct____missing_field_name_295 __annonCompField57;   int reject_error; } ;   128     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_290 __annonCompField53;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_291 __annonCompField54;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_292 __annonCompField56;   union __anonunion____missing_field_name_294 __annonCompField58;   int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;   377     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    90     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   377     struct percpu_ref ;    55     typedef void percpu_ref_func_t(struct percpu_ref *);    68     struct percpu_ref {   atomic_long_t count;   unsigned long percpu_count_ptr;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_switch;   bool force_atomic;   struct callback_head rcu; } ;   325     enum rcu_sync_type {   RCU_SYNC = 0,   RCU_SCHED_SYNC = 1,   RCU_BH_SYNC = 2 } ;   331     struct rcu_sync {   int gp_state;   int gp_count;   wait_queue_head_t gp_wait;   int cb_state;   struct callback_head cb_head;   enum rcu_sync_type gp_type; } ;    65     struct percpu_rw_semaphore {   struct rcu_sync rss;   unsigned int *fast_read_ctr;   struct rw_semaphore rw_sem;   atomic_t slow_read_ctr;   wait_queue_head_t write_waitq; } ;    54     struct cgroup ;    55     struct cgroup_root ;    56     struct cgroup_subsys ;    57     struct cgroup_taskset ;   101     struct cgroup_file {   struct kernfs_node *kn; } ;    90     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   atomic_t online_cnt;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   141     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head cgrp_links;   struct cgroup *dfl_cgrp;   struct cgroup_subsys_state *subsys[13U];   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct cgroup *mg_dst_cgrp;   struct css_set *mg_dst_cset;   struct list_head e_cset_node[13U];   struct list_head task_iters;   bool dead;   struct callback_head callback_head; } ;   221     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int level;   int populated_cnt;   struct kernfs_node *kn;   struct cgroup_file procs_file;   struct cgroup_file events_file;   u16 subtree_control;   u16 subtree_ss_mask;   u16 old_subtree_control;   u16 old_subtree_ss_mask;   struct cgroup_subsys_state *subsys[13U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[13U];   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq;   struct work_struct release_agent_work;   int ancestor_ids[]; } ;   306     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   int cgrp_ancestor_id_storage;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   345     struct cftype {   char name[64U];   unsigned long private;   size_t max_write_len;   unsigned int flags;   unsigned int file_offset;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   430     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_released)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   void (*css_reset)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_taskset *);   void (*attach)(struct cgroup_taskset *);   void (*post_attach)();   int (*can_fork)(struct task_struct *);   void (*cancel_fork)(struct task_struct *);   void (*fork)(struct task_struct *);   void (*exit)(struct task_struct *);   void (*free)(struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   bool early_init;   bool implicit_on_dfl;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   const char *legacy_name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *dfl_cftypes;   struct cftype *legacy_cftypes;   unsigned int depends_on; } ;   546     struct __anonstruct____missing_field_name_299 {   u8 is_data;   u8 padding;   u16 prioidx;   u32 classid; } ;   546     union __anonunion____missing_field_name_298 {   struct __anonstruct____missing_field_name_299 __annonCompField59;   u64 val; } ;   546     struct sock_cgroup_data {   union __anonunion____missing_field_name_298 __annonCompField60; } ;   128     struct futex_pi_state ;   129     struct robust_list_head ;   130     struct bio_list ;   131     struct fs_struct ;   132     struct perf_event_context ;   133     struct blk_plug ;   135     struct nameidata ;   188     struct cfs_rq ;   189     struct task_group ;   493     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   536     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   544     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   551     struct prev_cputime {   cputime_t utime;   cputime_t stime;   raw_spinlock_t lock; } ;   576     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   592     struct task_cputime_atomic {   atomic64_t utime;   atomic64_t stime;   atomic64_t sum_exec_runtime; } ;   614     struct thread_group_cputimer {   struct task_cputime_atomic cputime_atomic;   bool running;   bool checking_timer; } ;   659     struct autogroup ;   660     struct tty_struct ;   660     struct taskstats ;   660     struct tty_audit_buf ;   660     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   atomic_t oom_victims;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   seqlock_t stats_lock;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   struct tty_audit_buf *tty_audit_buf;   bool oom_flag_origin;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   835     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   unsigned long unix_inflight;   atomic_long_t pipe_bufs;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   880     struct backing_dev_info ;   881     struct reclaim_state ;   882     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   896     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   u64 blkio_start;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   u64 freepages_start;   u64 freepages_delay;   u32 freepages_count; } ;   953     struct wake_q_node {   struct wake_q_node *next; } ;  1185     struct io_context ;  1219     struct pipe_inode_info ;  1220     struct uts_namespace ;  1221     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1228     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;  1286     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1321     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1358     struct rt_rq ;  1358     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   unsigned short on_rq;   unsigned short on_list;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1376     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1440     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;  1459     struct sched_class ;  1459     struct files_struct ;  1459     struct compat_robust_list_head ;  1459     struct numa_group ;  1459     struct ftrace_ret_stack ;  1459     struct kcov ;  1459     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int btrace_seq;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char sched_remote_wakeup;   unsigned char;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char restore_sigmask;   unsigned char memcg_may_oom;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   unsigned int sas_ss_flags;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   unsigned int in_ubsan;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   u64 timer_slack_ns;   u64 default_timer_slack_ns;   unsigned int kasan_depth;   int curr_ret_stack;   struct ftrace_ret_stack *ret_stack;   unsigned long long ftrace_timestamp;   atomic_t trace_overrun;   atomic_t tracing_graph_pause;   unsigned long trace;   unsigned long trace_recursion;   enum kcov_mode kcov_mode;   unsigned int kcov_size;   void *kcov_area;   struct kcov *kcov;   struct mem_cgroup *memcg_in_oom;   gfp_t memcg_oom_gfp_mask;   int memcg_oom_order;   unsigned int memcg_nr_pages_over_high;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct task_struct *oom_reaper_list;   struct thread_struct thread; } ;   158     struct iovec {   void *iov_base;   __kernel_size_t iov_len; } ;    21     struct kvec {   void *iov_base;   size_t iov_len; } ;    27     union __anonunion____missing_field_name_326 {   const struct iovec *iov;   const struct kvec *kvec;   const struct bio_vec *bvec; } ;    27     struct iov_iter {   int type;   size_t iov_offset;   size_t count;   union __anonunion____missing_field_name_326 __annonCompField64;   unsigned long nr_segs; } ;    11     typedef unsigned short __kernel_sa_family_t;    23     typedef __kernel_sa_family_t sa_family_t;    24     struct sockaddr {   sa_family_t sa_family;   char sa_data[14U]; } ;    38     struct kiocb ;    38     struct msghdr {   void *msg_name;   int msg_namelen;   struct iov_iter msg_iter;   void *msg_control;   __kernel_size_t msg_controllen;   unsigned int msg_flags;   struct kiocb *msg_iocb; } ;   217     enum ldv_19069 {   SS_FREE = 0,   SS_UNCONNECTED = 1,   SS_CONNECTING = 2,   SS_CONNECTED = 3,   SS_DISCONNECTING = 4 } ;    53     typedef enum ldv_19069 socket_state;    54     struct poll_table_struct ;    55     struct net ;    72     struct fasync_struct ;    72     struct socket_wq {   wait_queue_head_t wait;   struct fasync_struct *fasync_list;   unsigned long flags;   struct callback_head rcu; } ;    99     struct proto_ops ;    99     struct socket {   socket_state state;   short type;   unsigned long flags;   struct socket_wq *wq;   struct file *file;   struct sock *sk;   const struct proto_ops *ops; } ;   125     struct proto_ops {   int family;   struct module *owner;   int (*release)(struct socket *);   int (*bind)(struct socket *, struct sockaddr *, int);   int (*connect)(struct socket *, struct sockaddr *, int, int);   int (*socketpair)(struct socket *, struct socket *);   int (*accept)(struct socket *, struct socket *, int);   int (*getname)(struct socket *, struct sockaddr *, int *, int);   unsigned int (*poll)(struct file *, struct socket *, struct poll_table_struct *);   int (*ioctl)(struct socket *, unsigned int, unsigned long);   int (*compat_ioctl)(struct socket *, unsigned int, unsigned long);   int (*listen)(struct socket *, int);   int (*shutdown)(struct socket *, int);   int (*setsockopt)(struct socket *, int, int, char *, unsigned int);   int (*getsockopt)(struct socket *, int, int, char *, int *);   int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int);   int (*compat_getsockopt)(struct socket *, int, int, char *, int *);   int (*sendmsg)(struct socket *, struct msghdr *, size_t );   int (*recvmsg)(struct socket *, struct msghdr *, size_t , int);   int (*mmap)(struct file *, struct socket *, struct vm_area_struct *);   ssize_t  (*sendpage)(struct socket *, struct page *, int, size_t , int);   ssize_t  (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*set_peek_off)(struct sock *, int);   int (*peek_len)(struct socket *); } ;    63     struct exception_table_entry {   int insn;   int fixup;   int handler; } ;   161     struct in6_addr ;   145     struct sk_buff ;   184     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_346 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_345 {   struct __anonstruct____missing_field_name_346 __annonCompField65; } ;   114     struct lockref {   union __anonunion____missing_field_name_345 __annonCompField66; } ;    77     struct path ;    78     struct vfsmount ;    79     struct __anonstruct____missing_field_name_348 {   u32 hash;   u32 len; } ;    79     union __anonunion____missing_field_name_347 {   struct __anonstruct____missing_field_name_348 __annonCompField67;   u64 hash_len; } ;    79     struct qstr {   union __anonunion____missing_field_name_347 __annonCompField68;   const unsigned char *name; } ;    65     struct dentry_operations ;    65     union __anonunion____missing_field_name_349 {   struct list_head d_lru;   wait_queue_head_t *d_wait; } ;    65     union __anonunion_d_u_350 {   struct hlist_node d_alias;   struct hlist_bl_node d_in_lookup_hash;   struct callback_head d_rcu; } ;    65     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   union __anonunion____missing_field_name_349 __annonCompField69;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_350 d_u; } ;   121     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   int (*d_init)(struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool );   struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;   591     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;    63     struct __anonstruct____missing_field_name_352 {   struct radix_tree_node *parent;   void *private_data; } ;    63     union __anonunion____missing_field_name_351 {   struct __anonstruct____missing_field_name_352 __annonCompField70;   struct callback_head callback_head; } ;    63     struct radix_tree_node {   unsigned char shift;   unsigned char offset;   unsigned int count;   union __anonunion____missing_field_name_351 __annonCompField71;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   106     struct radix_tree_root {   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    44     struct bio_vec {   struct page *bv_page;   unsigned int bv_len;   unsigned int bv_offset; } ;    34     struct bvec_iter {   sector_t bi_sector;   unsigned int bi_size;   unsigned int bi_idx;   unsigned int bi_bvec_done; } ;    84     struct bio_set ;    85     struct bio ;    86     struct bio_integrity_payload ;    87     struct block_device ;    18     typedef void bio_end_io_t(struct bio *);    20     union __anonunion____missing_field_name_359 {   struct bio_integrity_payload *bi_integrity; } ;    20     struct bio {   struct bio *bi_next;   struct block_device *bi_bdev;   int bi_error;   unsigned int bi_opf;   unsigned short bi_flags;   unsigned short bi_ioprio;   struct bvec_iter bi_iter;   unsigned int bi_phys_segments;   unsigned int bi_seg_front_size;   unsigned int bi_seg_back_size;   atomic_t __bi_remaining;   bio_end_io_t *bi_end_io;   void *bi_private;   struct io_context *bi_ioc;   struct cgroup_subsys_state *bi_css;   union __anonunion____missing_field_name_359 __annonCompField72;   unsigned short bi_vcnt;   unsigned short bi_max_vecs;   atomic_t __bi_cnt;   struct bio_vec *bi_io_vec;   struct bio_set *bi_pool;   struct bio_vec bi_inline_vecs[0U]; } ;   266     struct delayed_call {   void (*fn)(void *);   void *arg; } ;   261     struct bdi_writeback ;   262     struct export_operations ;   264     struct kstatfs ;   265     struct swap_info_struct ;   266     struct fscrypt_info ;   267     struct fscrypt_operations ;    76     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   261     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;   213     struct dquot ;   214     struct kqid ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_360 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_360 kprojid_t;   181     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_361 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_361 __annonCompField73;   enum quota_type type; } ;   194     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time64_t dqb_btime;   time64_t dqb_itime; } ;   216     struct quota_format_type ;   217     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   282     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   309     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   321     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   338     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   361     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   407     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   418     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   431     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   447     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   511     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   541     struct writeback_control ;   542     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   367     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   bool  (*isolate_page)(struct page *, isolate_mode_t );   void (*putback_page)(struct page *);   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   426     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrexceptional;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   447     struct request_queue ;   448     struct hd_struct ;   448     struct gendisk ;   448     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   563     struct posix_acl ;   589     struct inode_operations ;   589     union __anonunion____missing_field_name_366 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   589     union __anonunion____missing_field_name_367 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   589     struct file_lock_context ;   589     struct cdev ;   589     union __anonunion____missing_field_name_368 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link;   unsigned int i_dir_seq; } ;   589     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_366 __annonCompField74;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct rw_semaphore i_rwsem;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   struct list_head i_wb_list;   union __anonunion____missing_field_name_367 __annonCompField75;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_368 __annonCompField76;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   struct fscrypt_info *i_crypt_info;   void *i_private; } ;   843     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   851     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   874     union __anonunion_f_u_369 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   874     struct file {   union __anonunion_f_u_369 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   959     typedef void *fl_owner_t;   960     struct file_lock ;   961     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   967     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   994     struct nlm_lockowner ;   995     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct __anonstruct_afs_371 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_370 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_371 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_370 fl_u; } ;  1047     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1255     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1290     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1320     struct super_operations ;  1320     struct xattr_handler ;  1320     struct mtd_info ;  1320     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   const struct fscrypt_operations *s_cop;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct user_namespace *s_user_ns;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes;   spinlock_t s_inode_wblist_lock;   struct list_head s_inodes_wb; } ;  1603     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1616     struct dir_context ;  1641     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1648     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   int (*iterate_shared)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *);   ssize_t  (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int);   int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 );   ssize_t  (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;  1717     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1774     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  2018     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;  3193     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   const struct file *file;   void *private; } ;    30     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   222     struct pinctrl ;   223     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *init_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    76     struct dma_map_ops ;    76     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    24     struct device_private ;    25     struct device_driver ;    26     struct driver_private ;    27     struct class ;    28     struct subsys_private ;    29     struct bus_type ;    30     struct device_node ;    31     struct fwnode_handle ;    32     struct iommu_ops ;    33     struct iommu_group ;    61     struct device_attribute ;    61     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   142     struct device_type ;   201     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   207     struct of_device_id ;   207     struct acpi_device_id ;   207     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   357     struct class_attribute ;   357     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   450     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   518     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   546     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   699     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   708     struct irq_domain ;   708     struct dma_coherent_mem ;   708     struct cma ;   708     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   862     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;  1327     struct scatterlist ;    89     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;   273     struct vm_fault {   unsigned int flags;   gfp_t gfp_mask;   unsigned long pgoff;   void *virtual_address;   struct page *cow_page;   struct page *page;   void *entry; } ;   308     struct fault_env {   struct vm_area_struct *vma;   unsigned long address;   unsigned int flags;   pmd_t *pmd;   pte_t *pte;   spinlock_t *ptl;   pgtable_t prealloc_pte; } ;   335     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int);   void (*map_pages)(struct fault_env *, unsigned long, unsigned long);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  2451     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    21     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   406     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long);   void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    15     typedef u64 netdev_features_t;    70     union __anonunion_in6_u_382 {   __u8 u6_addr8[16U];   __be16 u6_addr16[8U];   __be32 u6_addr32[4U]; } ;    70     struct in6_addr {   union __anonunion_in6_u_382 in6_u; } ;    41     struct sockaddr_in6 {   unsigned short sin6_family;   __be16 sin6_port;   __be32 sin6_flowinfo;   struct in6_addr sin6_addr;   __u32 sin6_scope_id; } ;    46     struct ethhdr {   unsigned char h_dest[6U];   unsigned char h_source[6U];   __be16 h_proto; } ;   199     struct pipe_buf_operations ;   199     struct pipe_buffer {   struct page *page;   unsigned int offset;   unsigned int len;   const struct pipe_buf_operations *ops;   unsigned int flags;   unsigned long private; } ;    27     struct pipe_inode_info {   struct mutex mutex;   wait_queue_head_t wait;   unsigned int nrbufs;   unsigned int curbuf;   unsigned int buffers;   unsigned int readers;   unsigned int writers;   unsigned int files;   unsigned int waiting_writers;   unsigned int r_counter;   unsigned int w_counter;   struct page *tmp_page;   struct fasync_struct *fasync_readers;   struct fasync_struct *fasync_writers;   struct pipe_buffer *bufs;   struct user_struct *user; } ;    63     struct pipe_buf_operations {   int can_merge;   int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);   void (*release)(struct pipe_inode_info *, struct pipe_buffer *);   int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);   void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;   295     struct flowi_tunnel {   __be64 tun_id; } ;    26     struct flowi_common {   int flowic_oif;   int flowic_iif;   __u32 flowic_mark;   __u8 flowic_tos;   __u8 flowic_scope;   __u8 flowic_proto;   __u8 flowic_flags;   __u32 flowic_secid;   struct flowi_tunnel flowic_tun_key; } ;    42     struct __anonstruct_ports_389 {   __be16 dport;   __be16 sport; } ;    42     struct __anonstruct_icmpt_390 {   __u8 type;   __u8 code; } ;    42     struct __anonstruct_dnports_391 {   __le16 dport;   __le16 sport; } ;    42     struct __anonstruct_mht_392 {   __u8 type; } ;    42     union flowi_uli {   struct __anonstruct_ports_389 ports;   struct __anonstruct_icmpt_390 icmpt;   struct __anonstruct_dnports_391 dnports;   __be32 spi;   __be32 gre_key;   struct __anonstruct_mht_392 mht; } ;    66     struct flowi4 {   struct flowi_common __fl_common;   __be32 saddr;   __be32 daddr;   union flowi_uli uli; } ;   123     struct flowi6 {   struct flowi_common __fl_common;   struct in6_addr daddr;   struct in6_addr saddr;   __be32 flowlabel;   union flowi_uli uli; } ;   141     struct flowidn {   struct flowi_common __fl_common;   __le16 daddr;   __le16 saddr;   union flowi_uli uli; } ;   161     union __anonunion_u_393 {   struct flowi_common __fl_common;   struct flowi4 ip4;   struct flowi6 ip6;   struct flowidn dn; } ;   161     struct flowi {   union __anonunion_u_393 u; } ;   265     struct napi_struct ;   266     struct nf_conntrack {   atomic_t use; } ;   254     union __anonunion____missing_field_name_394 {   __be32 ipv4_daddr;   struct in6_addr ipv6_daddr;   char neigh_header[8U]; } ;   254     struct nf_bridge_info {   atomic_t use;   unsigned char orig_proto;   unsigned char pkt_otherhost;   unsigned char in_prerouting;   unsigned char bridged_dnat;   __u16 frag_max_size;   struct net_device *physindev;   struct net_device *physoutdev;   union __anonunion____missing_field_name_394 __annonCompField82; } ;   278     struct sk_buff_head {   struct sk_buff *next;   struct sk_buff *prev;   __u32 qlen;   spinlock_t lock; } ;   500     typedef unsigned int sk_buff_data_t;   501     struct __anonstruct____missing_field_name_397 {   u32 stamp_us;   u32 stamp_jiffies; } ;   501     union __anonunion____missing_field_name_396 {   u64 v64;   struct __anonstruct____missing_field_name_397 __annonCompField83; } ;   501     struct skb_mstamp {   union __anonunion____missing_field_name_396 __annonCompField84; } ;   564     union __anonunion____missing_field_name_400 {   ktime_t tstamp;   struct skb_mstamp skb_mstamp; } ;   564     struct __anonstruct____missing_field_name_399 {   struct sk_buff *next;   struct sk_buff *prev;   union __anonunion____missing_field_name_400 __annonCompField85; } ;   564     union __anonunion____missing_field_name_398 {   struct __anonstruct____missing_field_name_399 __annonCompField86;   struct rb_node rbnode; } ;   564     struct sec_path ;   564     struct __anonstruct____missing_field_name_402 {   __u16 csum_start;   __u16 csum_offset; } ;   564     union __anonunion____missing_field_name_401 {   __wsum csum;   struct __anonstruct____missing_field_name_402 __annonCompField88; } ;   564     union __anonunion____missing_field_name_403 {   unsigned int napi_id;   unsigned int sender_cpu; } ;   564     union __anonunion____missing_field_name_404 {   __u32 secmark;   __u32 offload_fwd_mark; } ;   564     union __anonunion____missing_field_name_405 {   __u32 mark;   __u32 reserved_tailroom; } ;   564     union __anonunion____missing_field_name_406 {   __be16 inner_protocol;   __u8 inner_ipproto; } ;   564     struct sk_buff {   union __anonunion____missing_field_name_398 __annonCompField87;   struct sock *sk;   struct net_device *dev;   char cb[48U];   unsigned long _skb_refdst;   void (*destructor)(struct sk_buff *);   struct sec_path *sp;   struct nf_conntrack *nfct;   struct nf_bridge_info *nf_bridge;   unsigned int len;   unsigned int data_len;   __u16 mac_len;   __u16 hdr_len;   __u16 queue_mapping;   unsigned char cloned;   unsigned char nohdr;   unsigned char fclone;   unsigned char peeked;   unsigned char head_frag;   unsigned char xmit_more;   __u32 headers_start[0U];   __u8 __pkt_type_offset[0U];   unsigned char pkt_type;   unsigned char pfmemalloc;   unsigned char ignore_df;   unsigned char nfctinfo;   unsigned char nf_trace;   unsigned char ip_summed;   unsigned char ooo_okay;   unsigned char l4_hash;   unsigned char sw_hash;   unsigned char wifi_acked_valid;   unsigned char wifi_acked;   unsigned char no_fcs;   unsigned char encapsulation;   unsigned char encap_hdr_csum;   unsigned char csum_valid;   unsigned char csum_complete_sw;   unsigned char csum_level;   unsigned char csum_bad;   unsigned char ndisc_nodetype;   unsigned char ipvs_property;   unsigned char inner_protocol_type;   unsigned char remcsum_offload;   __u16 tc_index;   __u16 tc_verd;   union __anonunion____missing_field_name_401 __annonCompField89;   __u32 priority;   int skb_iif;   __u32 hash;   __be16 vlan_proto;   __u16 vlan_tci;   union __anonunion____missing_field_name_403 __annonCompField90;   union __anonunion____missing_field_name_404 __annonCompField91;   union __anonunion____missing_field_name_405 __annonCompField92;   union __anonunion____missing_field_name_406 __annonCompField93;   __u16 inner_transport_header;   __u16 inner_network_header;   __u16 inner_mac_header;   __be16 protocol;   __u16 transport_header;   __u16 network_header;   __u16 mac_header;   __u32 headers_end[0U];   sk_buff_data_t tail;   sk_buff_data_t end;   unsigned char *head;   unsigned char *data;   unsigned int truesize;   atomic_t users; } ;   831     struct dst_entry ;   880     struct rtable ;    65     struct irq_poll ;     5     typedef int irq_poll_fn(struct irq_poll *, int);     6     struct irq_poll {   struct list_head list;   unsigned long state;   int weight;   irq_poll_fn *poll; } ;   180     struct ipv6_stable_secret {   bool initialized;   struct in6_addr secret; } ;    64     struct ipv6_devconf {   __s32 forwarding;   __s32 hop_limit;   __s32 mtu6;   __s32 accept_ra;   __s32 accept_redirects;   __s32 autoconf;   __s32 dad_transmits;   __s32 rtr_solicits;   __s32 rtr_solicit_interval;   __s32 rtr_solicit_delay;   __s32 force_mld_version;   __s32 mldv1_unsolicited_report_interval;   __s32 mldv2_unsolicited_report_interval;   __s32 use_tempaddr;   __s32 temp_valid_lft;   __s32 temp_prefered_lft;   __s32 regen_max_retry;   __s32 max_desync_factor;   __s32 max_addresses;   __s32 accept_ra_defrtr;   __s32 accept_ra_min_hop_limit;   __s32 accept_ra_pinfo;   __s32 ignore_routes_with_linkdown;   __s32 accept_ra_rtr_pref;   __s32 rtr_probe_interval;   __s32 accept_ra_rt_info_max_plen;   __s32 proxy_ndp;   __s32 accept_source_route;   __s32 accept_ra_from_local;   __s32 optimistic_dad;   __s32 use_optimistic;   __s32 mc_forwarding;   __s32 disable_ipv6;   __s32 drop_unicast_in_l2_multicast;   __s32 accept_dad;   __s32 force_tllao;   __s32 ndisc_notify;   __s32 suppress_frag_ndisc;   __s32 accept_ra_mtu;   __s32 drop_unsolicited_na;   struct ipv6_stable_secret stable_secret;   __s32 use_oif_addrs_only;   __s32 keep_addr_on_down;   struct ctl_table_header *sysctl_header; } ;  1402     struct dql {   unsigned int num_queued;   unsigned int adj_limit;   unsigned int last_obj_cnt;   unsigned int limit;   unsigned int num_completed;   unsigned int prev_ovlimit;   unsigned int prev_num_queued;   unsigned int prev_last_obj_cnt;   unsigned int lowest_slack;   unsigned long slack_start_time;   unsigned int max_limit;   unsigned int min_limit;   unsigned int slack_hold_time; } ;    43     struct __anonstruct_sync_serial_settings_410 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback; } ;    43     typedef struct __anonstruct_sync_serial_settings_410 sync_serial_settings;    50     struct __anonstruct_te1_settings_411 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback;   unsigned int slot_map; } ;    50     typedef struct __anonstruct_te1_settings_411 te1_settings;    55     struct __anonstruct_raw_hdlc_proto_412 {   unsigned short encoding;   unsigned short parity; } ;    55     typedef struct __anonstruct_raw_hdlc_proto_412 raw_hdlc_proto;    65     struct __anonstruct_fr_proto_413 {   unsigned int t391;   unsigned int t392;   unsigned int n391;   unsigned int n392;   unsigned int n393;   unsigned short lmi;   unsigned short dce; } ;    65     typedef struct __anonstruct_fr_proto_413 fr_proto;    69     struct __anonstruct_fr_proto_pvc_414 {   unsigned int dlci; } ;    69     typedef struct __anonstruct_fr_proto_pvc_414 fr_proto_pvc;    74     struct __anonstruct_fr_proto_pvc_info_415 {   unsigned int dlci;   char master[16U]; } ;    74     typedef struct __anonstruct_fr_proto_pvc_info_415 fr_proto_pvc_info;    79     struct __anonstruct_cisco_proto_416 {   unsigned int interval;   unsigned int timeout; } ;    79     typedef struct __anonstruct_cisco_proto_416 cisco_proto;   117     struct ifmap {   unsigned long mem_start;   unsigned long mem_end;   unsigned short base_addr;   unsigned char irq;   unsigned char dma;   unsigned char port; } ;   197     union __anonunion_ifs_ifsu_417 {   raw_hdlc_proto *raw_hdlc;   cisco_proto *cisco;   fr_proto *fr;   fr_proto_pvc *fr_pvc;   fr_proto_pvc_info *fr_pvc_info;   sync_serial_settings *sync;   te1_settings *te1; } ;   197     struct if_settings {   unsigned int type;   unsigned int size;   union __anonunion_ifs_ifsu_417 ifs_ifsu; } ;   216     union __anonunion_ifr_ifrn_418 {   char ifrn_name[16U]; } ;   216     union __anonunion_ifr_ifru_419 {   struct sockaddr ifru_addr;   struct sockaddr ifru_dstaddr;   struct sockaddr ifru_broadaddr;   struct sockaddr ifru_netmask;   struct sockaddr ifru_hwaddr;   short ifru_flags;   int ifru_ivalue;   int ifru_mtu;   struct ifmap ifru_map;   char ifru_slave[16U];   char ifru_newname[16U];   void *ifru_data;   struct if_settings ifru_settings; } ;   216     struct ifreq {   union __anonunion_ifr_ifrn_418 ifr_ifrn;   union __anonunion_ifr_ifru_419 ifr_ifru; } ;    18     typedef s32 compat_time_t;    39     typedef s32 compat_long_t;    45     typedef u32 compat_uptr_t;    46     struct compat_timespec {   compat_time_t tv_sec;   s32 tv_nsec; } ;   278     struct compat_robust_list {   compat_uptr_t next; } ;   282     struct compat_robust_list_head {   struct compat_robust_list list;   compat_long_t futex_offset;   compat_uptr_t list_op_pending; } ;    39     struct ethtool_cmd {   __u32 cmd;   __u32 supported;   __u32 advertising;   __u16 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 transceiver;   __u8 autoneg;   __u8 mdio_support;   __u32 maxtxpkt;   __u32 maxrxpkt;   __u16 speed_hi;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __u32 lp_advertising;   __u32 reserved[2U]; } ;   131     struct ethtool_drvinfo {   __u32 cmd;   char driver[32U];   char version[32U];   char fw_version[32U];   char bus_info[32U];   char erom_version[32U];   char reserved2[12U];   __u32 n_priv_flags;   __u32 n_stats;   __u32 testinfo_len;   __u32 eedump_len;   __u32 regdump_len; } ;   195     struct ethtool_wolinfo {   __u32 cmd;   __u32 supported;   __u32 wolopts;   __u8 sopass[6U]; } ;   239     struct ethtool_tunable {   __u32 cmd;   __u32 id;   __u32 type_id;   __u32 len;   void *data[0U]; } ;   251     struct ethtool_regs {   __u32 cmd;   __u32 version;   __u32 len;   __u8 data[0U]; } ;   273     struct ethtool_eeprom {   __u32 cmd;   __u32 magic;   __u32 offset;   __u32 len;   __u8 data[0U]; } ;   299     struct ethtool_eee {   __u32 cmd;   __u32 supported;   __u32 advertised;   __u32 lp_advertised;   __u32 eee_active;   __u32 eee_enabled;   __u32 tx_lpi_enabled;   __u32 tx_lpi_timer;   __u32 reserved[2U]; } ;   328     struct ethtool_modinfo {   __u32 cmd;   __u32 type;   __u32 eeprom_len;   __u32 reserved[8U]; } ;   345     struct ethtool_coalesce {   __u32 cmd;   __u32 rx_coalesce_usecs;   __u32 rx_max_coalesced_frames;   __u32 rx_coalesce_usecs_irq;   __u32 rx_max_coalesced_frames_irq;   __u32 tx_coalesce_usecs;   __u32 tx_max_coalesced_frames;   __u32 tx_coalesce_usecs_irq;   __u32 tx_max_coalesced_frames_irq;   __u32 stats_block_coalesce_usecs;   __u32 use_adaptive_rx_coalesce;   __u32 use_adaptive_tx_coalesce;   __u32 pkt_rate_low;   __u32 rx_coalesce_usecs_low;   __u32 rx_max_coalesced_frames_low;   __u32 tx_coalesce_usecs_low;   __u32 tx_max_coalesced_frames_low;   __u32 pkt_rate_high;   __u32 rx_coalesce_usecs_high;   __u32 rx_max_coalesced_frames_high;   __u32 tx_coalesce_usecs_high;   __u32 tx_max_coalesced_frames_high;   __u32 rate_sample_interval; } ;   444     struct ethtool_ringparam {   __u32 cmd;   __u32 rx_max_pending;   __u32 rx_mini_max_pending;   __u32 rx_jumbo_max_pending;   __u32 tx_max_pending;   __u32 rx_pending;   __u32 rx_mini_pending;   __u32 rx_jumbo_pending;   __u32 tx_pending; } ;   481     struct ethtool_channels {   __u32 cmd;   __u32 max_rx;   __u32 max_tx;   __u32 max_other;   __u32 max_combined;   __u32 rx_count;   __u32 tx_count;   __u32 other_count;   __u32 combined_count; } ;   509     struct ethtool_pauseparam {   __u32 cmd;   __u32 autoneg;   __u32 rx_pause;   __u32 tx_pause; } ;   613     struct ethtool_test {   __u32 cmd;   __u32 flags;   __u32 reserved;   __u32 len;   __u64 data[0U]; } ;   645     struct ethtool_stats {   __u32 cmd;   __u32 n_stats;   __u64 data[0U]; } ;   687     struct ethtool_tcpip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be16 psrc;   __be16 pdst;   __u8 tos; } ;   720     struct ethtool_ah_espip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 spi;   __u8 tos; } ;   736     struct ethtool_usrip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 l4_4_bytes;   __u8 tos;   __u8 ip_ver;   __u8 proto; } ;   756     struct ethtool_tcpip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be16 psrc;   __be16 pdst;   __u8 tclass; } ;   774     struct ethtool_ah_espip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 spi;   __u8 tclass; } ;   790     struct ethtool_usrip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 l4_4_bytes;   __u8 tclass;   __u8 l4_proto; } ;   806     union ethtool_flow_union {   struct ethtool_tcpip4_spec tcp_ip4_spec;   struct ethtool_tcpip4_spec udp_ip4_spec;   struct ethtool_tcpip4_spec sctp_ip4_spec;   struct ethtool_ah_espip4_spec ah_ip4_spec;   struct ethtool_ah_espip4_spec esp_ip4_spec;   struct ethtool_usrip4_spec usr_ip4_spec;   struct ethtool_tcpip6_spec tcp_ip6_spec;   struct ethtool_tcpip6_spec udp_ip6_spec;   struct ethtool_tcpip6_spec sctp_ip6_spec;   struct ethtool_ah_espip6_spec ah_ip6_spec;   struct ethtool_ah_espip6_spec esp_ip6_spec;   struct ethtool_usrip6_spec usr_ip6_spec;   struct ethhdr ether_spec;   __u8 hdata[52U]; } ;   823     struct ethtool_flow_ext {   __u8 padding[2U];   unsigned char h_dest[6U];   __be16 vlan_etype;   __be16 vlan_tci;   __be32 data[2U]; } ;   842     struct ethtool_rx_flow_spec {   __u32 flow_type;   union ethtool_flow_union h_u;   struct ethtool_flow_ext h_ext;   union ethtool_flow_union m_u;   struct ethtool_flow_ext m_ext;   __u64 ring_cookie;   __u32 location; } ;   892     struct ethtool_rxnfc {   __u32 cmd;   __u32 flow_type;   __u64 data;   struct ethtool_rx_flow_spec fs;   __u32 rule_cnt;   __u32 rule_locs[0U]; } ;  1063     struct ethtool_flash {   __u32 cmd;   __u32 region;   char data[128U]; } ;  1071     struct ethtool_dump {   __u32 cmd;   __u32 version;   __u32 flag;   __u32 len;   __u8 data[0U]; } ;  1147     struct ethtool_ts_info {   __u32 cmd;   __u32 so_timestamping;   __s32 phc_index;   __u32 tx_types;   __u32 tx_reserved[3U];   __u32 rx_filters;   __u32 rx_reserved[3U]; } ;  1515     struct ethtool_link_settings {   __u32 cmd;   __u32 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 autoneg;   __u8 mdio_support;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __s8 link_mode_masks_nwords;   __u32 reserved[8U];   __u32 link_mode_masks[0U]; } ;    39     enum ethtool_phys_id_state {   ETHTOOL_ID_INACTIVE = 0,   ETHTOOL_ID_ACTIVE = 1,   ETHTOOL_ID_ON = 2,   ETHTOOL_ID_OFF = 3 } ;    97     struct __anonstruct_link_modes_439 {   unsigned long supported[1U];   unsigned long advertising[1U];   unsigned long lp_advertising[1U]; } ;    97     struct ethtool_link_ksettings {   struct ethtool_link_settings base;   struct __anonstruct_link_modes_439 link_modes; } ;   158     struct ethtool_ops {   int (*get_settings)(struct net_device *, struct ethtool_cmd *);   int (*set_settings)(struct net_device *, struct ethtool_cmd *);   void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);   int (*get_regs_len)(struct net_device *);   void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);   void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);   int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);   u32  (*get_msglevel)(struct net_device *);   void (*set_msglevel)(struct net_device *, u32 );   int (*nway_reset)(struct net_device *);   u32  (*get_link)(struct net_device *);   int (*get_eeprom_len)(struct net_device *);   int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);   int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);   void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);   int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);   void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);   void (*get_strings)(struct net_device *, u32 , u8 *);   int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state );   void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);   int (*begin)(struct net_device *);   void (*complete)(struct net_device *);   u32  (*get_priv_flags)(struct net_device *);   int (*set_priv_flags)(struct net_device *, u32 );   int (*get_sset_count)(struct net_device *, int);   int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);   int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);   int (*flash_device)(struct net_device *, struct ethtool_flash *);   int (*reset)(struct net_device *, u32 *);   u32  (*get_rxfh_key_size)(struct net_device *);   u32  (*get_rxfh_indir_size)(struct net_device *);   int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *);   int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 );   void (*get_channels)(struct net_device *, struct ethtool_channels *);   int (*set_channels)(struct net_device *, struct ethtool_channels *);   int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);   int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);   int (*set_dump)(struct net_device *, struct ethtool_dump *);   int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);   int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);   int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_eee)(struct net_device *, struct ethtool_eee *);   int (*set_eee)(struct net_device *, struct ethtool_eee *);   int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *);   int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *);   int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *);   int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;   375     struct prot_inuse ;   376     struct netns_core {   struct ctl_table_header *sysctl_hdr;   int sysctl_somaxconn;   struct prot_inuse *inuse; } ;    38     struct u64_stats_sync { } ;   160     struct ipstats_mib {   u64 mibs[36U];   struct u64_stats_sync syncp; } ;    61     struct icmp_mib {   unsigned long mibs[28U]; } ;    67     struct icmpmsg_mib {   atomic_long_t mibs[512U]; } ;    72     struct icmpv6_mib {   unsigned long mibs[6U]; } ;    79     struct icmpv6_mib_device {   atomic_long_t mibs[6U]; } ;    83     struct icmpv6msg_mib {   atomic_long_t mibs[512U]; } ;    89     struct icmpv6msg_mib_device {   atomic_long_t mibs[512U]; } ;    93     struct tcp_mib {   unsigned long mibs[16U]; } ;   100     struct udp_mib {   unsigned long mibs[9U]; } ;   106     struct linux_mib {   unsigned long mibs[117U]; } ;   112     struct linux_xfrm_mib {   unsigned long mibs[29U]; } ;   118     struct proc_dir_entry ;   118     struct netns_mib {   struct tcp_mib *tcp_statistics;   struct ipstats_mib *ip_statistics;   struct linux_mib *net_statistics;   struct udp_mib *udp_statistics;   struct udp_mib *udplite_statistics;   struct icmp_mib *icmp_statistics;   struct icmpmsg_mib *icmpmsg_statistics;   struct proc_dir_entry *proc_net_devsnmp6;   struct udp_mib *udp_stats_in6;   struct udp_mib *udplite_stats_in6;   struct ipstats_mib *ipv6_statistics;   struct icmpv6_mib *icmpv6_statistics;   struct icmpv6msg_mib *icmpv6msg_statistics;   struct linux_xfrm_mib *xfrm_statistics; } ;    26     struct netns_unix {   int sysctl_max_dgram_qlen;   struct ctl_table_header *ctl; } ;    12     struct netns_packet {   struct mutex sklist_lock;   struct hlist_head sklist; } ;    14     struct netns_frags {   struct percpu_counter mem;   int timeout;   int high_thresh;   int low_thresh;   int max_dist; } ;   187     struct ipv4_devconf ;   188     struct fib_rules_ops ;   189     struct fib_table ;   190     struct local_ports {   seqlock_t lock;   int range[2U];   bool warned; } ;    24     struct ping_group_range {   seqlock_t lock;   kgid_t range[2U]; } ;    29     struct inet_peer_base ;    29     struct xt_table ;    29     struct netns_ipv4 {   struct ctl_table_header *forw_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *ipv4_hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *xfrm4_hdr;   struct ipv4_devconf *devconf_all;   struct ipv4_devconf *devconf_dflt;   struct fib_rules_ops *rules_ops;   bool fib_has_custom_rules;   struct fib_table *fib_local;   struct fib_table *fib_main;   struct fib_table *fib_default;   int fib_num_tclassid_users;   struct hlist_head *fib_table_hash;   bool fib_offload_disabled;   struct sock *fibnl;   struct sock **icmp_sk;   struct sock *mc_autojoin_sk;   struct inet_peer_base *peers;   struct sock **tcp_sk;   struct netns_frags frags;   struct xt_table *iptable_filter;   struct xt_table *iptable_mangle;   struct xt_table *iptable_raw;   struct xt_table *arptable_filter;   struct xt_table *iptable_security;   struct xt_table *nat_table;   int sysctl_icmp_echo_ignore_all;   int sysctl_icmp_echo_ignore_broadcasts;   int sysctl_icmp_ignore_bogus_error_responses;   int sysctl_icmp_ratelimit;   int sysctl_icmp_ratemask;   int sysctl_icmp_errors_use_inbound_ifaddr;   struct local_ports ip_local_ports;   int sysctl_tcp_ecn;   int sysctl_tcp_ecn_fallback;   int sysctl_ip_default_ttl;   int sysctl_ip_no_pmtu_disc;   int sysctl_ip_fwd_use_pmtu;   int sysctl_ip_nonlocal_bind;   int sysctl_ip_dynaddr;   int sysctl_ip_early_demux;   int sysctl_fwmark_reflect;   int sysctl_tcp_fwmark_accept;   int sysctl_tcp_l3mdev_accept;   int sysctl_tcp_mtu_probing;   int sysctl_tcp_base_mss;   int sysctl_tcp_probe_threshold;   u32 sysctl_tcp_probe_interval;   int sysctl_tcp_keepalive_time;   int sysctl_tcp_keepalive_probes;   int sysctl_tcp_keepalive_intvl;   int sysctl_tcp_syn_retries;   int sysctl_tcp_synack_retries;   int sysctl_tcp_syncookies;   int sysctl_tcp_reordering;   int sysctl_tcp_retries1;   int sysctl_tcp_retries2;   int sysctl_tcp_orphan_retries;   int sysctl_tcp_fin_timeout;   unsigned int sysctl_tcp_notsent_lowat;   int sysctl_igmp_max_memberships;   int sysctl_igmp_max_msf;   int sysctl_igmp_llm_reports;   int sysctl_igmp_qrv;   struct ping_group_range ping_group_range;   atomic_t dev_addr_genid;   unsigned long *sysctl_local_reserved_ports;   struct list_head mr_tables;   struct fib_rules_ops *mr_rules_ops;   int sysctl_fib_multipath_use_neigh;   atomic_t rt_genid; } ;   142     struct neighbour ;   142     struct dst_ops {   unsigned short family;   unsigned int gc_thresh;   int (*gc)(struct dst_ops *);   struct dst_entry * (*check)(struct dst_entry *, __u32 );   unsigned int (*default_advmss)(const struct dst_entry *);   unsigned int (*mtu)(const struct dst_entry *);   u32 * (*cow_metrics)(struct dst_entry *, unsigned long);   void (*destroy)(struct dst_entry *);   void (*ifdown)(struct dst_entry *, struct net_device *, int);   struct dst_entry * (*negative_advice)(struct dst_entry *);   void (*link_failure)(struct sk_buff *);   void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 );   void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);   int (*local_out)(struct net *, struct sock *, struct sk_buff *);   struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);   struct kmem_cache *kmem_cachep;   struct percpu_counter pcpuc_entries; } ;    73     struct netns_sysctl_ipv6 {   struct ctl_table_header *hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *icmp_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *xfrm6_hdr;   int bindv6only;   int flush_delay;   int ip6_rt_max_size;   int ip6_rt_gc_min_interval;   int ip6_rt_gc_timeout;   int ip6_rt_gc_interval;   int ip6_rt_gc_elasticity;   int ip6_rt_mtu_expires;   int ip6_rt_min_advmss;   int flowlabel_consistency;   int auto_flowlabels;   int icmpv6_time;   int anycast_src_echo_reply;   int ip_nonlocal_bind;   int fwmark_reflect;   int idgen_retries;   int idgen_delay;   int flowlabel_state_ranges; } ;    40     struct rt6_info ;    40     struct rt6_statistics ;    40     struct fib6_table ;    40     struct netns_ipv6 {   struct netns_sysctl_ipv6 sysctl;   struct ipv6_devconf *devconf_all;   struct ipv6_devconf *devconf_dflt;   struct inet_peer_base *peers;   struct netns_frags frags;   struct xt_table *ip6table_filter;   struct xt_table *ip6table_mangle;   struct xt_table *ip6table_raw;   struct xt_table *ip6table_security;   struct xt_table *ip6table_nat;   struct rt6_info *ip6_null_entry;   struct rt6_statistics *rt6_stats;   struct timer_list ip6_fib_timer;   struct hlist_head *fib_table_hash;   struct fib6_table *fib6_main_tbl;   struct list_head fib6_walkers;   struct dst_ops ip6_dst_ops;   rwlock_t fib6_walker_lock;   spinlock_t fib6_gc_lock;   unsigned int ip6_rt_gc_expire;   unsigned long ip6_rt_last_gc;   struct rt6_info *ip6_prohibit_entry;   struct rt6_info *ip6_blk_hole_entry;   struct fib6_table *fib6_local_tbl;   struct fib_rules_ops *fib6_rules_ops;   struct sock **icmp_sk;   struct sock *ndisc_sk;   struct sock *tcp_sk;   struct sock *igmp_sk;   struct sock *mc_autojoin_sk;   struct list_head mr6_tables;   struct fib_rules_ops *mr6_rules_ops;   atomic_t dev_addr_genid;   atomic_t fib6_sernum; } ;    89     struct netns_nf_frag {   struct netns_sysctl_ipv6 sysctl;   struct netns_frags frags; } ;    95     struct netns_sysctl_lowpan {   struct ctl_table_header *frags_hdr; } ;    14     struct netns_ieee802154_lowpan {   struct netns_sysctl_lowpan sysctl;   struct netns_frags frags; } ;    20     struct sctp_mib ;    21     struct netns_sctp {   struct sctp_mib *sctp_statistics;   struct proc_dir_entry *proc_net_sctp;   struct ctl_table_header *sysctl_header;   struct sock *ctl_sock;   struct list_head local_addr_list;   struct list_head addr_waitq;   struct timer_list addr_wq_timer;   struct list_head auto_asconf_splist;   spinlock_t addr_wq_lock;   spinlock_t local_addr_lock;   unsigned int rto_initial;   unsigned int rto_min;   unsigned int rto_max;   int rto_alpha;   int rto_beta;   int max_burst;   int cookie_preserve_enable;   char *sctp_hmac_alg;   unsigned int valid_cookie_life;   unsigned int sack_timeout;   unsigned int hb_interval;   int max_retrans_association;   int max_retrans_path;   int max_retrans_init;   int pf_retrans;   int pf_enable;   int sndbuf_policy;   int rcvbuf_policy;   int default_auto_asconf;   int addip_enable;   int addip_noauth;   int prsctp_enable;   int auth_enable;   int scope_policy;   int rwnd_upd_shift;   unsigned long max_autoclose; } ;   141     struct netns_dccp {   struct sock *v4_ctl_sk;   struct sock *v6_ctl_sk; } ;    39     struct in_addr {   __be32 s_addr; } ;   225     struct sockaddr_in {   __kernel_sa_family_t sin_family;   __be16 sin_port;   struct in_addr sin_addr;   unsigned char __pad[8U]; } ;    79     struct nf_logger ;    80     struct nf_queue_handler ;    81     struct netns_nf {   struct proc_dir_entry *proc_netfilter;   const struct nf_queue_handler *queue_handler;   const struct nf_logger *nf_loggers[13U];   struct ctl_table_header *nf_log_dir_header;   struct list_head hooks[13U][8U]; } ;    21     struct ebt_table ;    22     struct netns_xt {   struct list_head tables[13U];   bool notrack_deprecated_warning;   bool clusterip_deprecated_warning;   struct ebt_table *broute_table;   struct ebt_table *frame_filter;   struct ebt_table *frame_nat; } ;    19     struct hlist_nulls_node ;    19     struct hlist_nulls_head {   struct hlist_nulls_node *first; } ;    23     struct hlist_nulls_node {   struct hlist_nulls_node *next;   struct hlist_nulls_node **pprev; } ;    32     struct nf_proto_net {   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table;   struct ctl_table_header *ctl_compat_header;   struct ctl_table *ctl_compat_table;   unsigned int users; } ;    25     struct nf_generic_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    30     struct nf_tcp_net {   struct nf_proto_net pn;   unsigned int timeouts[14U];   unsigned int tcp_loose;   unsigned int tcp_be_liberal;   unsigned int tcp_max_retrans; } ;    44     struct nf_udp_net {   struct nf_proto_net pn;   unsigned int timeouts[2U]; } ;    49     struct nf_icmp_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    54     struct nf_ip_net {   struct nf_generic_net generic;   struct nf_tcp_net tcp;   struct nf_udp_net udp;   struct nf_icmp_net icmp;   struct nf_icmp_net icmpv6;   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table; } ;    65     struct ct_pcpu {   spinlock_t lock;   struct hlist_nulls_head unconfirmed;   struct hlist_nulls_head dying; } ;    72     struct ip_conntrack_stat ;    72     struct nf_ct_event_notifier ;    72     struct nf_exp_event_notifier ;    72     struct netns_ct {   atomic_t count;   unsigned int expect_count;   struct delayed_work ecache_dwork;   bool ecache_dwork_pending;   struct ctl_table_header *sysctl_header;   struct ctl_table_header *acct_sysctl_header;   struct ctl_table_header *tstamp_sysctl_header;   struct ctl_table_header *event_sysctl_header;   struct ctl_table_header *helper_sysctl_header;   unsigned int sysctl_log_invalid;   int sysctl_events;   int sysctl_acct;   int sysctl_auto_assign_helper;   bool auto_assign_helper_warned;   int sysctl_tstamp;   int sysctl_checksum;   struct ct_pcpu *pcpu_lists;   struct ip_conntrack_stat *stat;   struct nf_ct_event_notifier *nf_conntrack_event_cb;   struct nf_exp_event_notifier *nf_expect_event_cb;   struct nf_ip_net nf_ct_proto;   unsigned int labels_used;   u8 label_words; } ;   104     struct nft_af_info ;   105     struct netns_nftables {   struct list_head af_info;   struct list_head commit_list;   struct nft_af_info *ipv4;   struct nft_af_info *ipv6;   struct nft_af_info *inet;   struct nft_af_info *arp;   struct nft_af_info *bridge;   struct nft_af_info *netdev;   unsigned int base_seq;   u8 gencursor; } ;   486     struct tasklet_struct {   struct tasklet_struct *next;   unsigned long state;   atomic_t count;   void (*func)(unsigned long);   unsigned long data; } ;   708     struct flow_cache_percpu {   struct hlist_head *hash_table;   int hash_count;   u32 hash_rnd;   int hash_rnd_recalc;   struct tasklet_struct flush_tasklet; } ;    16     struct flow_cache {   u32 hash_shift;   struct flow_cache_percpu *percpu;   struct notifier_block hotcpu_notifier;   int low_watermark;   int high_watermark;   struct timer_list rnd_timer; } ;    25     struct xfrm_policy_hash {   struct hlist_head *table;   unsigned int hmask;   u8 dbits4;   u8 sbits4;   u8 dbits6;   u8 sbits6; } ;    21     struct xfrm_policy_hthresh {   struct work_struct work;   seqlock_t lock;   u8 lbits4;   u8 rbits4;   u8 lbits6;   u8 rbits6; } ;    30     struct netns_xfrm {   struct list_head state_all;   struct hlist_head *state_bydst;   struct hlist_head *state_bysrc;   struct hlist_head *state_byspi;   unsigned int state_hmask;   unsigned int state_num;   struct work_struct state_hash_work;   struct hlist_head state_gc_list;   struct work_struct state_gc_work;   struct list_head policy_all;   struct hlist_head *policy_byidx;   unsigned int policy_idx_hmask;   struct hlist_head policy_inexact[3U];   struct xfrm_policy_hash policy_bydst[3U];   unsigned int policy_count[6U];   struct work_struct policy_hash_work;   struct xfrm_policy_hthresh policy_hthresh;   struct sock *nlsk;   struct sock *nlsk_stash;   u32 sysctl_aevent_etime;   u32 sysctl_aevent_rseqth;   int sysctl_larval_drop;   u32 sysctl_acq_expires;   struct ctl_table_header *sysctl_hdr;   struct dst_ops xfrm4_dst_ops;   struct dst_ops xfrm6_dst_ops;   spinlock_t xfrm_state_lock;   rwlock_t xfrm_policy_lock;   struct mutex xfrm_cfg_mutex;   struct flow_cache flow_cache_global;   atomic_t flow_cache_genid;   struct list_head flow_cache_gc_list;   atomic_t flow_cache_gc_count;   spinlock_t flow_cache_gc_lock;   struct work_struct flow_cache_gc_work;   struct work_struct flow_cache_flush_work;   struct mutex flow_flush_sem; } ;    89     struct mpls_route ;    90     struct netns_mpls {   size_t platform_labels;   struct mpls_route **platform_label;   struct ctl_table_header *ctl; } ;    16     struct proc_ns_operations ;    17     struct ns_common {   atomic_long_t stashed;   const struct proc_ns_operations *ops;   unsigned int inum; } ;    11     struct net_generic ;    12     struct netns_ipvs ;    13     struct net {   atomic_t passive;   atomic_t count;   spinlock_t rules_mod_lock;   atomic64_t cookie_gen;   struct list_head list;   struct list_head cleanup_list;   struct list_head exit_list;   struct user_namespace *user_ns;   spinlock_t nsid_lock;   struct idr netns_ids;   struct ns_common ns;   struct proc_dir_entry *proc_net;   struct proc_dir_entry *proc_net_stat;   struct ctl_table_set sysctls;   struct sock *rtnl;   struct sock *genl_sock;   struct list_head dev_base_head;   struct hlist_head *dev_name_head;   struct hlist_head *dev_index_head;   unsigned int dev_base_seq;   int ifindex;   unsigned int dev_unreg_count;   struct list_head rules_ops;   struct net_device *loopback_dev;   struct netns_core core;   struct netns_mib mib;   struct netns_packet packet;   struct netns_unix unx;   struct netns_ipv4 ipv4;   struct netns_ipv6 ipv6;   struct netns_ieee802154_lowpan ieee802154_lowpan;   struct netns_sctp sctp;   struct netns_dccp dccp;   struct netns_nf nf;   struct netns_xt xt;   struct netns_ct ct;   struct netns_nftables nft;   struct netns_nf_frag nf_frag;   struct sock *nfnl;   struct sock *nfnl_stash;   struct list_head nfnl_acct_list;   struct list_head nfct_timeout_list;   struct sk_buff_head wext_nlevents;   struct net_generic *gen;   struct netns_xfrm xfrm;   struct netns_ipvs *ipvs;   struct netns_mpls mpls;   struct sock *diag_nlsk;   atomic_t fnhe_genid; } ;   247     struct __anonstruct_possible_net_t_454 {   struct net *net; } ;   247     typedef struct __anonstruct_possible_net_t_454 possible_net_t;    13     typedef unsigned long kernel_ulong_t;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   229     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   674     enum fwnode_type {   FWNODE_INVALID = 0,   FWNODE_OF = 1,   FWNODE_ACPI = 2,   FWNODE_ACPI_DATA = 3,   FWNODE_PDATA = 4,   FWNODE_IRQCHIP = 5 } ;   683     struct fwnode_handle {   enum fwnode_type type;   struct fwnode_handle *secondary; } ;    32     typedef u32 phandle;    34     struct property {   char *name;   int length;   void *value;   struct property *next;   unsigned long _flags;   unsigned int unique_id;   struct bin_attribute attr; } ;    44     struct device_node {   const char *name;   const char *type;   phandle phandle;   const char *full_name;   struct fwnode_handle fwnode;   struct property *properties;   struct property *deadprops;   struct device_node *parent;   struct device_node *child;   struct device_node *sibling;   struct kobject kobj;   unsigned long _flags;   void *data; } ;   296     struct mii_bus ;   303     struct mdio_device {   struct device dev;   const struct dev_pm_ops *pm_ops;   struct mii_bus *bus;   int (*bus_match)(struct device *, struct device_driver *);   void (*device_free)(struct mdio_device *);   void (*device_remove)(struct mdio_device *);   int addr;   int flags; } ;    41     struct mdio_driver_common {   struct device_driver driver;   int flags; } ;   244     struct phy_device ;   245     enum ldv_30804 {   PHY_INTERFACE_MODE_NA = 0,   PHY_INTERFACE_MODE_MII = 1,   PHY_INTERFACE_MODE_GMII = 2,   PHY_INTERFACE_MODE_SGMII = 3,   PHY_INTERFACE_MODE_TBI = 4,   PHY_INTERFACE_MODE_REVMII = 5,   PHY_INTERFACE_MODE_RMII = 6,   PHY_INTERFACE_MODE_RGMII = 7,   PHY_INTERFACE_MODE_RGMII_ID = 8,   PHY_INTERFACE_MODE_RGMII_RXID = 9,   PHY_INTERFACE_MODE_RGMII_TXID = 10,   PHY_INTERFACE_MODE_RTBI = 11,   PHY_INTERFACE_MODE_SMII = 12,   PHY_INTERFACE_MODE_XGMII = 13,   PHY_INTERFACE_MODE_MOCA = 14,   PHY_INTERFACE_MODE_QSGMII = 15,   PHY_INTERFACE_MODE_MAX = 16 } ;    84     typedef enum ldv_30804 phy_interface_t;   130     enum ldv_30855 {   MDIOBUS_ALLOCATED = 1,   MDIOBUS_REGISTERED = 2,   MDIOBUS_UNREGISTERED = 3,   MDIOBUS_RELEASED = 4 } ;   137     struct mii_bus {   struct module *owner;   const char *name;   char id[17U];   void *priv;   int (*read)(struct mii_bus *, int, int);   int (*write)(struct mii_bus *, int, int, u16 );   int (*reset)(struct mii_bus *);   struct mutex mdio_lock;   struct device *parent;   enum ldv_30855 state;   struct device dev;   struct mdio_device *mdio_map[32U];   u32 phy_mask;   u32 phy_ignore_ta_mask;   int irq[32U]; } ;   218     enum phy_state {   PHY_DOWN = 0,   PHY_STARTING = 1,   PHY_READY = 2,   PHY_PENDING = 3,   PHY_UP = 4,   PHY_AN = 5,   PHY_RUNNING = 6,   PHY_NOLINK = 7,   PHY_FORCING = 8,   PHY_CHANGELINK = 9,   PHY_HALTED = 10,   PHY_RESUMING = 11 } ;   233     struct phy_c45_device_ids {   u32 devices_in_package;   u32 device_ids[8U]; } ;   326     struct phy_driver ;   326     struct phy_device {   struct mdio_device mdio;   struct phy_driver *drv;   u32 phy_id;   struct phy_c45_device_ids c45_ids;   bool is_c45;   bool is_internal;   bool is_pseudo_fixed_link;   bool has_fixups;   bool suspended;   enum phy_state state;   u32 dev_flags;   phy_interface_t interface;   int speed;   int duplex;   int pause;   int asym_pause;   int link;   u32 interrupts;   u32 supported;   u32 advertising;   u32 lp_advertising;   int autoneg;   int link_timeout;   int irq;   void *priv;   struct work_struct phy_queue;   struct delayed_work state_queue;   atomic_t irq_disable;   struct mutex lock;   struct net_device *attached_dev;   u8 mdix;   void (*adjust_link)(struct net_device *); } ;   428     struct phy_driver {   struct mdio_driver_common mdiodrv;   u32 phy_id;   char *name;   unsigned int phy_id_mask;   u32 features;   u32 flags;   const void *driver_data;   int (*soft_reset)(struct phy_device *);   int (*config_init)(struct phy_device *);   int (*probe)(struct phy_device *);   int (*suspend)(struct phy_device *);   int (*resume)(struct phy_device *);   int (*config_aneg)(struct phy_device *);   int (*aneg_done)(struct phy_device *);   int (*read_status)(struct phy_device *);   int (*ack_interrupt)(struct phy_device *);   int (*config_intr)(struct phy_device *);   int (*did_interrupt)(struct phy_device *);   void (*remove)(struct phy_device *);   int (*match_phy_device)(struct phy_device *);   int (*ts_info)(struct phy_device *, struct ethtool_ts_info *);   int (*hwtstamp)(struct phy_device *, struct ifreq *);   bool  (*rxtstamp)(struct phy_device *, struct sk_buff *, int);   void (*txtstamp)(struct phy_device *, struct sk_buff *, int);   int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *);   void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *);   void (*link_change_notify)(struct phy_device *);   int (*read_mmd_indirect)(struct phy_device *, int, int, int);   void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 );   int (*module_info)(struct phy_device *, struct ethtool_modinfo *);   int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *);   int (*get_sset_count)(struct phy_device *);   void (*get_strings)(struct phy_device *, u8 *);   void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ;   841     struct fixed_phy_status {   int link;   int speed;   int duplex;   int pause;   int asym_pause; } ;    27     enum dsa_tag_protocol {   DSA_TAG_PROTO_NONE = 0,   DSA_TAG_PROTO_DSA = 1,   DSA_TAG_PROTO_TRAILER = 2,   DSA_TAG_PROTO_EDSA = 3,   DSA_TAG_PROTO_BRCM = 4,   DSA_TAG_LAST = 5 } ;    36     struct dsa_chip_data {   struct device *host_dev;   int sw_addr;   int eeprom_len;   struct device_node *of_node;   char *port_names[12U];   struct device_node *port_dn[12U];   s8 rtable[4U]; } ;    70     struct dsa_platform_data {   struct device *netdev;   struct net_device *of_netdev;   int nr_chips;   struct dsa_chip_data *chip; } ;    86     struct packet_type ;    87     struct dsa_switch ;    87     struct dsa_device_ops ;    87     struct dsa_switch_tree {   struct list_head list;   u32 tree;   struct kref refcount;   bool applied;   struct dsa_platform_data *pd;   struct net_device *master_netdev;   int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   struct ethtool_ops master_ethtool_ops;   const struct ethtool_ops *master_orig_ethtool_ops;   s8 cpu_switch;   s8 cpu_port;   struct dsa_switch *ds[4U];   const struct dsa_device_ops *tag_ops; } ;   140     struct dsa_port {   struct net_device *netdev;   struct device_node *dn;   unsigned int ageing_time; } ;   146     struct dsa_switch_driver ;   146     struct dsa_switch {   struct device *dev;   struct dsa_switch_tree *dst;   int index;   void *priv;   struct dsa_chip_data *cd;   struct dsa_switch_driver *drv;   s8 rtable[4U];   char hwmon_name[24U];   struct device *hwmon_dev;   struct net_device *master_netdev;   u32 dsa_port_mask;   u32 cpu_port_mask;   u32 enabled_port_mask;   u32 phys_mii_mask;   struct dsa_port ports[12U];   struct mii_bus *slave_mii_bus; } ;   233     struct switchdev_trans ;   234     struct switchdev_obj ;   235     struct switchdev_obj_port_fdb ;   236     struct switchdev_obj_port_vlan ;   237     struct dsa_switch_driver {   struct list_head list;   enum dsa_tag_protocol tag_protocol;   const char * (*probe)(struct device *, struct device *, int, void **);   int (*setup)(struct dsa_switch *);   int (*set_addr)(struct dsa_switch *, u8 *);   u32  (*get_phy_flags)(struct dsa_switch *, int);   int (*phy_read)(struct dsa_switch *, int, int);   int (*phy_write)(struct dsa_switch *, int, int, u16 );   void (*adjust_link)(struct dsa_switch *, int, struct phy_device *);   void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *);   void (*get_strings)(struct dsa_switch *, int, uint8_t *);   void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *);   int (*get_sset_count)(struct dsa_switch *);   void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*suspend)(struct dsa_switch *);   int (*resume)(struct dsa_switch *);   int (*port_enable)(struct dsa_switch *, int, struct phy_device *);   void (*port_disable)(struct dsa_switch *, int, struct phy_device *);   int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *);   int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *);   int (*get_temp)(struct dsa_switch *, int *);   int (*get_temp_limit)(struct dsa_switch *, int *);   int (*set_temp_limit)(struct dsa_switch *, int);   int (*get_temp_alarm)(struct dsa_switch *, bool *);   int (*get_eeprom_len)(struct dsa_switch *);   int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*get_regs_len)(struct dsa_switch *, int);   void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *);   int (*set_ageing_time)(struct dsa_switch *, unsigned int);   int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *);   void (*port_bridge_leave)(struct dsa_switch *, int);   void (*port_stp_state_set)(struct dsa_switch *, int, u8 );   int (*port_vlan_filtering)(struct dsa_switch *, int, bool );   int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *);   int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *));   int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *);   int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); } ;   389     struct ieee_ets {   __u8 willing;   __u8 ets_cap;   __u8 cbs;   __u8 tc_tx_bw[8U];   __u8 tc_rx_bw[8U];   __u8 tc_tsa[8U];   __u8 prio_tc[8U];   __u8 tc_reco_bw[8U];   __u8 tc_reco_tsa[8U];   __u8 reco_prio_tc[8U]; } ;    69     struct ieee_maxrate {   __u64 tc_maxrate[8U]; } ;    87     struct ieee_qcn {   __u8 rpg_enable[8U];   __u32 rppp_max_rps[8U];   __u32 rpg_time_reset[8U];   __u32 rpg_byte_reset[8U];   __u32 rpg_threshold[8U];   __u32 rpg_max_rate[8U];   __u32 rpg_ai_rate[8U];   __u32 rpg_hai_rate[8U];   __u32 rpg_gd[8U];   __u32 rpg_min_dec_fac[8U];   __u32 rpg_min_rate[8U];   __u32 cndd_state_machine[8U]; } ;   132     struct ieee_qcn_stats {   __u64 rppp_rp_centiseconds[8U];   __u32 rppp_created_rps[8U]; } ;   144     struct ieee_pfc {   __u8 pfc_cap;   __u8 pfc_en;   __u8 mbc;   __u16 delay;   __u64 requests[8U];   __u64 indications[8U]; } ;   164     struct cee_pg {   __u8 willing;   __u8 error;   __u8 pg_en;   __u8 tcs_supported;   __u8 pg_bw[8U];   __u8 prio_pg[8U]; } ;   187     struct cee_pfc {   __u8 willing;   __u8 error;   __u8 pfc_en;   __u8 tcs_supported; } ;   202     struct dcb_app {   __u8 selector;   __u8 priority;   __u16 protocol; } ;   236     struct dcb_peer_app_info {   __u8 willing;   __u8 error; } ;    40     struct dcbnl_rtnl_ops {   int (*ieee_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_setets)(struct net_device *, struct ieee_ets *);   int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *);   int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_getapp)(struct net_device *, struct dcb_app *);   int (*ieee_setapp)(struct net_device *, struct dcb_app *);   int (*ieee_delapp)(struct net_device *, struct dcb_app *);   int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *);   u8  (*getstate)(struct net_device *);   u8  (*setstate)(struct net_device *, u8 );   void (*getpermhwaddr)(struct net_device *, u8 *);   void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgtx)(struct net_device *, int, u8 );   void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgrx)(struct net_device *, int, u8 );   void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);   void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);   void (*setpfccfg)(struct net_device *, int, u8 );   void (*getpfccfg)(struct net_device *, int, u8 *);   u8  (*setall)(struct net_device *);   u8  (*getcap)(struct net_device *, int, u8 *);   int (*getnumtcs)(struct net_device *, int, u8 *);   int (*setnumtcs)(struct net_device *, int, u8 );   u8  (*getpfcstate)(struct net_device *);   void (*setpfcstate)(struct net_device *, u8 );   void (*getbcncfg)(struct net_device *, int, u32 *);   void (*setbcncfg)(struct net_device *, int, u32 );   void (*getbcnrp)(struct net_device *, int, u8 *);   void (*setbcnrp)(struct net_device *, int, u8 );   int (*setapp)(struct net_device *, u8 , u16 , u8 );   int (*getapp)(struct net_device *, u8 , u16 );   u8  (*getfeatcfg)(struct net_device *, int, u8 *);   u8  (*setfeatcfg)(struct net_device *, int, u8 );   u8  (*getdcbx)(struct net_device *);   u8  (*setdcbx)(struct net_device *, u8 );   int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *);   int (*peer_getapptable)(struct net_device *, struct dcb_app *);   int (*cee_peer_getpg)(struct net_device *, struct cee_pg *);   int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;   105     struct taskstats {   __u16 version;   __u32 ac_exitcode;   __u8 ac_flag;   __u8 ac_nice;   __u64 cpu_count;   __u64 cpu_delay_total;   __u64 blkio_count;   __u64 blkio_delay_total;   __u64 swapin_count;   __u64 swapin_delay_total;   __u64 cpu_run_real_total;   __u64 cpu_run_virtual_total;   char ac_comm[32U];   __u8 ac_sched;   __u8 ac_pad[3U];   __u32 ac_uid;   __u32 ac_gid;   __u32 ac_pid;   __u32 ac_ppid;   __u32 ac_btime;   __u64 ac_etime;   __u64 ac_utime;   __u64 ac_stime;   __u64 ac_minflt;   __u64 ac_majflt;   __u64 coremem;   __u64 virtmem;   __u64 hiwater_rss;   __u64 hiwater_vm;   __u64 read_char;   __u64 write_char;   __u64 read_syscalls;   __u64 write_syscalls;   __u64 read_bytes;   __u64 write_bytes;   __u64 cancelled_write_bytes;   __u64 nvcsw;   __u64 nivcsw;   __u64 ac_utimescaled;   __u64 ac_stimescaled;   __u64 cpu_scaled_run_real_total;   __u64 freepages_count;   __u64 freepages_delay_total; } ;    58     struct mnt_namespace ;    59     struct ipc_namespace ;    60     struct cgroup_namespace ;    61     struct nsproxy {   atomic_t count;   struct uts_namespace *uts_ns;   struct ipc_namespace *ipc_ns;   struct mnt_namespace *mnt_ns;   struct pid_namespace *pid_ns_for_children;   struct net *net_ns;   struct cgroup_namespace *cgroup_ns; } ;    86     struct uid_gid_extent {   u32 first;   u32 lower_first;   u32 count; } ;    19     struct uid_gid_map {   u32 nr_extents;   struct uid_gid_extent extent[5U]; } ;    20     struct user_namespace {   struct uid_gid_map uid_map;   struct uid_gid_map gid_map;   struct uid_gid_map projid_map;   atomic_t count;   struct user_namespace *parent;   int level;   kuid_t owner;   kgid_t group;   struct ns_common ns;   unsigned long flags;   struct key *persistent_keyring_register;   struct rw_semaphore persistent_keyring_register_sem; } ;   609     struct cgroup_namespace {   atomic_t count;   struct ns_common ns;   struct user_namespace *user_ns;   struct css_set *root_cset; } ;   663     struct netprio_map {   struct callback_head rcu;   u32 priomap_len;   u32 priomap[]; } ;    99     struct xfrm_policy ;   100     struct xfrm_state ;   116     struct request_sock ;    41     struct nlmsghdr {   __u32 nlmsg_len;   __u16 nlmsg_type;   __u16 nlmsg_flags;   __u32 nlmsg_seq;   __u32 nlmsg_pid; } ;   143     struct nlattr {   __u16 nla_len;   __u16 nla_type; } ;   105     struct netlink_callback {   struct sk_buff *skb;   const struct nlmsghdr *nlh;   int (*start)(struct netlink_callback *);   int (*dump)(struct sk_buff *, struct netlink_callback *);   int (*done)(struct netlink_callback *);   void *data;   struct module *module;   u16 family;   u16 min_dump_alloc;   unsigned int prev_seq;   unsigned int seq;   long args[6U]; } ;   183     struct ndmsg {   __u8 ndm_family;   __u8 ndm_pad1;   __u16 ndm_pad2;   __s32 ndm_ifindex;   __u16 ndm_state;   __u8 ndm_flags;   __u8 ndm_type; } ;    41     struct rtnl_link_stats64 {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 rx_errors;   __u64 tx_errors;   __u64 rx_dropped;   __u64 tx_dropped;   __u64 multicast;   __u64 collisions;   __u64 rx_length_errors;   __u64 rx_over_errors;   __u64 rx_crc_errors;   __u64 rx_frame_errors;   __u64 rx_fifo_errors;   __u64 rx_missed_errors;   __u64 tx_aborted_errors;   __u64 tx_carrier_errors;   __u64 tx_fifo_errors;   __u64 tx_heartbeat_errors;   __u64 tx_window_errors;   __u64 rx_compressed;   __u64 tx_compressed;   __u64 rx_nohandler; } ;   840     struct ifla_vf_stats {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 broadcast;   __u64 multicast; } ;    16     struct ifla_vf_info {   __u32 vf;   __u8 mac[32U];   __u32 vlan;   __u32 qos;   __u32 spoofchk;   __u32 linkstate;   __u32 min_tx_rate;   __u32 max_tx_rate;   __u32 rss_query_en;   __u32 trusted; } ;   118     struct tc_stats {   __u64 bytes;   __u32 packets;   __u32 drops;   __u32 overlimits;   __u32 bps;   __u32 pps;   __u32 qlen;   __u32 backlog; } ;    96     struct tc_sizespec {   unsigned char cell_log;   unsigned char size_log;   short cell_align;   int overhead;   unsigned int linklayer;   unsigned int mpu;   unsigned int mtu;   unsigned int tsize; } ;   486     struct netpoll_info ;   487     struct wireless_dev ;   488     struct wpan_dev ;   489     struct mpls_dev ;   490     struct udp_tunnel_info ;   491     struct bpf_prog ;    69     enum netdev_tx {   __NETDEV_TX_MIN = -2147483648,   NETDEV_TX_OK = 0,   NETDEV_TX_BUSY = 16 } ;   112     typedef enum netdev_tx netdev_tx_t;   131     struct net_device_stats {   unsigned long rx_packets;   unsigned long tx_packets;   unsigned long rx_bytes;   unsigned long tx_bytes;   unsigned long rx_errors;   unsigned long tx_errors;   unsigned long rx_dropped;   unsigned long tx_dropped;   unsigned long multicast;   unsigned long collisions;   unsigned long rx_length_errors;   unsigned long rx_over_errors;   unsigned long rx_crc_errors;   unsigned long rx_frame_errors;   unsigned long rx_fifo_errors;   unsigned long rx_missed_errors;   unsigned long tx_aborted_errors;   unsigned long tx_carrier_errors;   unsigned long tx_fifo_errors;   unsigned long tx_heartbeat_errors;   unsigned long tx_window_errors;   unsigned long rx_compressed;   unsigned long tx_compressed; } ;   194     struct neigh_parms ;   215     struct netdev_hw_addr_list {   struct list_head list;   int count; } ;   220     struct hh_cache {   u16 hh_len;   u16 __pad;   seqlock_t hh_lock;   unsigned long hh_data[16U]; } ;   249     struct header_ops {   int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int);   int (*parse)(const struct sk_buff *, unsigned char *);   int (*cache)(const struct neighbour *, struct hh_cache *, __be16 );   void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *);   bool  (*validate)(const char *, unsigned int); } ;   300     struct napi_struct {   struct list_head poll_list;   unsigned long state;   int weight;   unsigned int gro_count;   int (*poll)(struct napi_struct *, int);   spinlock_t poll_lock;   int poll_owner;   struct net_device *dev;   struct sk_buff *gro_list;   struct sk_buff *skb;   struct hrtimer timer;   struct list_head dev_list;   struct hlist_node napi_hash_node;   unsigned int napi_id; } ;   346     enum rx_handler_result {   RX_HANDLER_CONSUMED = 0,   RX_HANDLER_ANOTHER = 1,   RX_HANDLER_EXACT = 2,   RX_HANDLER_PASS = 3 } ;   394     typedef enum rx_handler_result rx_handler_result_t;   395     typedef rx_handler_result_t  rx_handler_func_t(struct sk_buff **);   540     struct Qdisc ;   540     struct netdev_queue {   struct net_device *dev;   struct Qdisc *qdisc;   struct Qdisc *qdisc_sleeping;   struct kobject kobj;   int numa_node;   unsigned long tx_maxrate;   unsigned long trans_timeout;   spinlock_t _xmit_lock;   int xmit_lock_owner;   unsigned long trans_start;   unsigned long state;   struct dql dql; } ;   611     struct rps_map {   unsigned int len;   struct callback_head rcu;   u16 cpus[0U]; } ;   623     struct rps_dev_flow {   u16 cpu;   u16 filter;   unsigned int last_qtail; } ;   635     struct rps_dev_flow_table {   unsigned int mask;   struct callback_head rcu;   struct rps_dev_flow flows[0U]; } ;   687     struct netdev_rx_queue {   struct rps_map *rps_map;   struct rps_dev_flow_table *rps_flow_table;   struct kobject kobj;   struct net_device *dev; } ;   710     struct xps_map {   unsigned int len;   unsigned int alloc_len;   struct callback_head rcu;   u16 queues[0U]; } ;   723     struct xps_dev_maps {   struct callback_head rcu;   struct xps_map *cpu_map[0U]; } ;   734     struct netdev_tc_txq {   u16 count;   u16 offset; } ;   745     struct netdev_fcoe_hbainfo {   char manufacturer[64U];   char serial_number[64U];   char hardware_version[64U];   char driver_version[64U];   char optionrom_version[64U];   char firmware_version[64U];   char model[256U];   char model_description[256U]; } ;   761     struct netdev_phys_item_id {   unsigned char id[32U];   unsigned char id_len; } ;   788     struct tc_cls_u32_offload ;   789     struct tc_cls_flower_offload ;   789     struct tc_cls_matchall_offload ;   789     union __anonunion____missing_field_name_470 {   u8 tc;   struct tc_cls_u32_offload *cls_u32;   struct tc_cls_flower_offload *cls_flower;   struct tc_cls_matchall_offload *cls_mall; } ;   789     struct tc_to_netdev {   unsigned int type;   union __anonunion____missing_field_name_470 __annonCompField106; } ;   804     enum xdp_netdev_command {   XDP_SETUP_PROG = 0,   XDP_QUERY_PROG = 1 } ;   809     union __anonunion____missing_field_name_471 {   struct bpf_prog *prog;   bool prog_attached; } ;   809     struct netdev_xdp {   enum xdp_netdev_command command;   union __anonunion____missing_field_name_471 __annonCompField107; } ;   832     struct net_device_ops {   int (*ndo_init)(struct net_device *);   void (*ndo_uninit)(struct net_device *);   int (*ndo_open)(struct net_device *);   int (*ndo_stop)(struct net_device *);   netdev_tx_t  (*ndo_start_xmit)(struct sk_buff *, struct net_device *);   netdev_features_t  (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t );   u16  (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16  (*)(struct net_device *, struct sk_buff *));   void (*ndo_change_rx_flags)(struct net_device *, int);   void (*ndo_set_rx_mode)(struct net_device *);   int (*ndo_set_mac_address)(struct net_device *, void *);   int (*ndo_validate_addr)(struct net_device *);   int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);   int (*ndo_set_config)(struct net_device *, struct ifmap *);   int (*ndo_change_mtu)(struct net_device *, int);   int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);   void (*ndo_tx_timeout)(struct net_device *);   struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);   struct net_device_stats * (*ndo_get_stats)(struct net_device *);   int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 );   int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 );   void (*ndo_poll_controller)(struct net_device *);   int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *);   void (*ndo_netpoll_cleanup)(struct net_device *);   int (*ndo_busy_poll)(struct napi_struct *);   int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);   int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 );   int (*ndo_set_vf_rate)(struct net_device *, int, int, int);   int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool );   int (*ndo_set_vf_trust)(struct net_device *, int, bool );   int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);   int (*ndo_set_vf_link_state)(struct net_device *, int, int);   int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *);   int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);   int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);   int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int);   int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool );   int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *);   int (*ndo_fcoe_enable)(struct net_device *);   int (*ndo_fcoe_disable)(struct net_device *);   int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_ddp_done)(struct net_device *, u16 );   int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *);   int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int);   int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 );   int (*ndo_add_slave)(struct net_device *, struct net_device *);   int (*ndo_del_slave)(struct net_device *, struct net_device *);   netdev_features_t  (*ndo_fix_features)(struct net_device *, netdev_features_t );   int (*ndo_set_features)(struct net_device *, netdev_features_t );   int (*ndo_neigh_construct)(struct net_device *, struct neighbour *);   void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *);   int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 );   int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 );   int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int);   int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int);   int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_change_carrier)(struct net_device *, bool );   int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *);   int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t );   void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *);   void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *);   void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);   void (*ndo_dfwd_del_station)(struct net_device *, void *);   netdev_tx_t  (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *);   int (*ndo_get_lock_subclass)(struct net_device *);   int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 );   int (*ndo_get_iflink)(const struct net_device *);   int (*ndo_change_proto_down)(struct net_device *, bool );   int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *);   void (*ndo_set_rx_headroom)(struct net_device *, int);   int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;  1354     struct __anonstruct_adj_list_472 {   struct list_head upper;   struct list_head lower; } ;  1354     struct __anonstruct_all_adj_list_473 {   struct list_head upper;   struct list_head lower; } ;  1354     struct iw_handler_def ;  1354     struct iw_public_data ;  1354     struct switchdev_ops ;  1354     struct l3mdev_ops ;  1354     struct ndisc_ops ;  1354     struct vlan_info ;  1354     struct tipc_bearer ;  1354     struct in_device ;  1354     struct dn_dev ;  1354     struct inet6_dev ;  1354     struct tcf_proto ;  1354     struct cpu_rmap ;  1354     struct pcpu_lstats ;  1354     struct pcpu_sw_netstats ;  1354     struct pcpu_dstats ;  1354     struct pcpu_vstats ;  1354     union __anonunion____missing_field_name_474 {   void *ml_priv;   struct pcpu_lstats *lstats;   struct pcpu_sw_netstats *tstats;   struct pcpu_dstats *dstats;   struct pcpu_vstats *vstats; } ;  1354     struct garp_port ;  1354     struct mrp_port ;  1354     struct rtnl_link_ops ;  1354     struct net_device {   char name[16U];   struct hlist_node name_hlist;   char *ifalias;   unsigned long mem_end;   unsigned long mem_start;   unsigned long base_addr;   int irq;   atomic_t carrier_changes;   unsigned long state;   struct list_head dev_list;   struct list_head napi_list;   struct list_head unreg_list;   struct list_head close_list;   struct list_head ptype_all;   struct list_head ptype_specific;   struct __anonstruct_adj_list_472 adj_list;   struct __anonstruct_all_adj_list_473 all_adj_list;   netdev_features_t features;   netdev_features_t hw_features;   netdev_features_t wanted_features;   netdev_features_t vlan_features;   netdev_features_t hw_enc_features;   netdev_features_t mpls_features;   netdev_features_t gso_partial_features;   int ifindex;   int group;   struct net_device_stats stats;   atomic_long_t rx_dropped;   atomic_long_t tx_dropped;   atomic_long_t rx_nohandler;   const struct iw_handler_def *wireless_handlers;   struct iw_public_data *wireless_data;   const struct net_device_ops *netdev_ops;   const struct ethtool_ops *ethtool_ops;   const struct switchdev_ops *switchdev_ops;   const struct l3mdev_ops *l3mdev_ops;   const struct ndisc_ops *ndisc_ops;   const struct header_ops *header_ops;   unsigned int flags;   unsigned int priv_flags;   unsigned short gflags;   unsigned short padded;   unsigned char operstate;   unsigned char link_mode;   unsigned char if_port;   unsigned char dma;   unsigned int mtu;   unsigned short type;   unsigned short hard_header_len;   unsigned short needed_headroom;   unsigned short needed_tailroom;   unsigned char perm_addr[32U];   unsigned char addr_assign_type;   unsigned char addr_len;   unsigned short neigh_priv_len;   unsigned short dev_id;   unsigned short dev_port;   spinlock_t addr_list_lock;   unsigned char name_assign_type;   bool uc_promisc;   struct netdev_hw_addr_list uc;   struct netdev_hw_addr_list mc;   struct netdev_hw_addr_list dev_addrs;   struct kset *queues_kset;   unsigned int promiscuity;   unsigned int allmulti;   struct vlan_info *vlan_info;   struct dsa_switch_tree *dsa_ptr;   struct tipc_bearer *tipc_ptr;   void *atalk_ptr;   struct in_device *ip_ptr;   struct dn_dev *dn_ptr;   struct inet6_dev *ip6_ptr;   void *ax25_ptr;   struct wireless_dev *ieee80211_ptr;   struct wpan_dev *ieee802154_ptr;   struct mpls_dev *mpls_ptr;   unsigned long last_rx;   unsigned char *dev_addr;   struct netdev_rx_queue *_rx;   unsigned int num_rx_queues;   unsigned int real_num_rx_queues;   unsigned long gro_flush_timeout;   rx_handler_func_t *rx_handler;   void *rx_handler_data;   struct tcf_proto *ingress_cl_list;   struct netdev_queue *ingress_queue;   struct list_head nf_hooks_ingress;   unsigned char broadcast[32U];   struct cpu_rmap *rx_cpu_rmap;   struct hlist_node index_hlist;   struct netdev_queue *_tx;   unsigned int num_tx_queues;   unsigned int real_num_tx_queues;   struct Qdisc *qdisc;   unsigned long tx_queue_len;   spinlock_t tx_global_lock;   int watchdog_timeo;   struct xps_dev_maps *xps_maps;   struct tcf_proto *egress_cl_list;   u32 offload_fwd_mark;   struct timer_list watchdog_timer;   int *pcpu_refcnt;   struct list_head todo_list;   struct list_head link_watch_list;   unsigned char reg_state;   bool dismantle;   unsigned short rtnl_link_state;   void (*destructor)(struct net_device *);   struct netpoll_info *npinfo;   possible_net_t nd_net;   union __anonunion____missing_field_name_474 __annonCompField108;   struct garp_port *garp_port;   struct mrp_port *mrp_port;   struct device dev;   const struct attribute_group *sysfs_groups[4U];   const struct attribute_group *sysfs_rx_queue_group;   const struct rtnl_link_ops *rtnl_link_ops;   unsigned int gso_max_size;   u16 gso_max_segs;   const struct dcbnl_rtnl_ops *dcbnl_ops;   u8 num_tc;   struct netdev_tc_txq tc_to_txq[16U];   u8 prio_tc_map[16U];   unsigned int fcoe_ddp_xid;   struct netprio_map *priomap;   struct phy_device *phydev;   struct lock_class_key *qdisc_tx_busylock;   struct lock_class_key *qdisc_running_key;   bool proto_down; } ;  2165     struct packet_type {   __be16 type;   struct net_device *dev;   int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   bool  (*id_match)(struct packet_type *, struct sock *);   void *af_packet_priv;   struct list_head list; } ;  2195     struct pcpu_sw_netstats {   u64 rx_packets;   u64 rx_bytes;   u64 tx_packets;   u64 tx_bytes;   struct u64_stats_sync syncp; } ;   103     struct page_counter {   atomic_long_t count;   unsigned long limit;   struct page_counter *parent;   unsigned long watermark;   unsigned long failcnt; } ;    33     struct eventfd_ctx ;    41     struct vmpressure {   unsigned long scanned;   unsigned long reclaimed;   unsigned long tree_scanned;   unsigned long tree_reclaimed;   struct spinlock sr_lock;   struct list_head events;   struct mutex events_lock;   struct work_struct work; } ;    44     struct fprop_global {   struct percpu_counter events;   unsigned int period;   seqcount_t sequence; } ;    72     struct fprop_local_percpu {   struct percpu_counter events;   unsigned int period;   raw_spinlock_t lock; } ;    32     typedef int congested_fn(void *, int);    41     struct bdi_writeback_congested {   unsigned long state;   atomic_t refcnt;   struct backing_dev_info *bdi;   int blkcg_id;   struct rb_node rb_node; } ;    60     union __anonunion____missing_field_name_479 {   struct work_struct release_work;   struct callback_head rcu; } ;    60     struct bdi_writeback {   struct backing_dev_info *bdi;   unsigned long state;   unsigned long last_old_flush;   struct list_head b_dirty;   struct list_head b_io;   struct list_head b_more_io;   struct list_head b_dirty_time;   spinlock_t list_lock;   struct percpu_counter stat[4U];   struct bdi_writeback_congested *congested;   unsigned long bw_time_stamp;   unsigned long dirtied_stamp;   unsigned long written_stamp;   unsigned long write_bandwidth;   unsigned long avg_write_bandwidth;   unsigned long dirty_ratelimit;   unsigned long balanced_dirty_ratelimit;   struct fprop_local_percpu completions;   int dirty_exceeded;   spinlock_t work_lock;   struct list_head work_list;   struct delayed_work dwork;   struct list_head bdi_node;   struct percpu_ref refcnt;   struct fprop_local_percpu memcg_completions;   struct cgroup_subsys_state *memcg_css;   struct cgroup_subsys_state *blkcg_css;   struct list_head memcg_node;   struct list_head blkcg_node;   union __anonunion____missing_field_name_479 __annonCompField109; } ;   134     struct backing_dev_info {   struct list_head bdi_list;   unsigned long ra_pages;   unsigned int capabilities;   congested_fn *congested_fn;   void *congested_data;   char *name;   unsigned int min_ratio;   unsigned int max_ratio;   unsigned int max_prop_frac;   atomic_long_t tot_write_bandwidth;   struct bdi_writeback wb;   struct list_head wb_list;   struct radix_tree_root cgwb_tree;   struct rb_root cgwb_congested_tree;   atomic_t usage_cnt;   wait_queue_head_t wb_waitq;   struct device *dev;   struct device *owner;   struct timer_list laptop_mode_wb_timer;   struct dentry *debug_dir;   struct dentry *debug_stats; } ;    14     enum writeback_sync_modes {   WB_SYNC_NONE = 0,   WB_SYNC_ALL = 1 } ;    31     struct writeback_control {   long nr_to_write;   long pages_skipped;   loff_t range_start;   loff_t range_end;   enum writeback_sync_modes sync_mode;   unsigned char for_kupdate;   unsigned char for_background;   unsigned char tagged_writepages;   unsigned char for_reclaim;   unsigned char range_cyclic;   unsigned char for_sync;   struct bdi_writeback *wb;   struct inode *inode;   int wb_id;   int wb_lcand_id;   int wb_tcand_id;   size_t wb_bytes;   size_t wb_lcand_bytes;   size_t wb_tcand_bytes; } ;   101     struct wb_domain {   spinlock_t lock;   struct fprop_global completions;   struct timer_list period_timer;   unsigned long period_time;   unsigned long dirty_limit_tstamp;   unsigned long dirty_limit; } ;    12     typedef void * mempool_alloc_t(gfp_t , void *);    13     typedef void mempool_free_t(void *, void *);    14     struct mempool_s {   spinlock_t lock;   int min_nr;   int curr_nr;   void **elements;   void *pool_data;   mempool_alloc_t *alloc;   mempool_free_t *free;   wait_queue_head_t wait; } ;    25     typedef struct mempool_s mempool_t;    79     union __anonunion____missing_field_name_480 {   struct list_head q_node;   struct kmem_cache *__rcu_icq_cache; } ;    79     union __anonunion____missing_field_name_481 {   struct hlist_node ioc_node;   struct callback_head __rcu_head; } ;    79     struct io_cq {   struct request_queue *q;   struct io_context *ioc;   union __anonunion____missing_field_name_480 __annonCompField110;   union __anonunion____missing_field_name_481 __annonCompField111;   unsigned int flags; } ;    92     struct io_context {   atomic_long_t refcount;   atomic_t active_ref;   atomic_t nr_tasks;   spinlock_t lock;   unsigned short ioprio;   int nr_batch_requests;   unsigned long last_waited;   struct radix_tree_root icq_tree;   struct io_cq *icq_hint;   struct hlist_head icq_list;   struct work_struct release_work; } ;   295     struct bio_integrity_payload {   struct bio *bip_bio;   struct bvec_iter bip_iter;   bio_end_io_t *bip_end_io;   unsigned short bip_slab;   unsigned short bip_vcnt;   unsigned short bip_max_vcnt;   unsigned short bip_flags;   struct work_struct bip_work;   struct bio_vec *bip_vec;   struct bio_vec bip_inline_vecs[0U]; } ;   529     struct bio_list {   struct bio *head;   struct bio *tail; } ;   661     struct bio_set {   struct kmem_cache *bio_slab;   unsigned int front_pad;   mempool_t *bio_pool;   mempool_t *bvec_pool;   mempool_t *bio_integrity_pool;   mempool_t *bvec_integrity_pool;   spinlock_t rescue_lock;   struct bio_list rescue_list;   struct work_struct rescue_work;   struct workqueue_struct *rescue_workqueue; } ;    87     struct mem_cgroup_id {   int id;   atomic_t ref; } ;   104     struct mem_cgroup_stat_cpu {   long count[11U];   unsigned long events[8U];   unsigned long nr_page_events;   unsigned long targets[3U]; } ;   111     struct mem_cgroup_reclaim_iter {   struct mem_cgroup *position;   unsigned int generation; } ;   117     struct mem_cgroup_per_node {   struct lruvec lruvec;   unsigned long lru_size[5U];   struct mem_cgroup_reclaim_iter iter[13U];   struct rb_node tree_node;   unsigned long usage_in_excess;   bool on_tree;   struct mem_cgroup *memcg; } ;   133     struct mem_cgroup_threshold {   struct eventfd_ctx *eventfd;   unsigned long threshold; } ;   139     struct mem_cgroup_threshold_ary {   int current_threshold;   unsigned int size;   struct mem_cgroup_threshold entries[0U]; } ;   149     struct mem_cgroup_thresholds {   struct mem_cgroup_threshold_ary *primary;   struct mem_cgroup_threshold_ary *spare; } ;   160     enum memcg_kmem_state {   KMEM_NONE = 0,   KMEM_ALLOCATED = 1,   KMEM_ONLINE = 2 } ;   166     struct mem_cgroup {   struct cgroup_subsys_state css;   struct mem_cgroup_id id;   struct page_counter memory;   struct page_counter swap;   struct page_counter memsw;   struct page_counter kmem;   struct page_counter tcpmem;   unsigned long low;   unsigned long high;   struct work_struct high_work;   unsigned long soft_limit;   struct vmpressure vmpressure;   bool use_hierarchy;   bool oom_lock;   int under_oom;   int swappiness;   int oom_kill_disable;   struct cgroup_file events_file;   struct mutex thresholds_lock;   struct mem_cgroup_thresholds thresholds;   struct mem_cgroup_thresholds memsw_thresholds;   struct list_head oom_notify;   unsigned long move_charge_at_immigrate;   atomic_t moving_account;   spinlock_t move_lock;   struct task_struct *move_lock_task;   unsigned long move_lock_flags;   struct mem_cgroup_stat_cpu *stat;   unsigned long socket_pressure;   bool tcpmem_active;   int tcpmem_pressure;   int kmemcg_id;   enum memcg_kmem_state kmem_state;   int last_scanned_node;   nodemask_t scan_nodes;   atomic_t numainfo_events;   atomic_t numainfo_updating;   struct list_head cgwb_list;   struct wb_domain cgwb_domain;   struct list_head event_list;   spinlock_t event_list_lock;   struct mem_cgroup_per_node *nodeinfo[0U]; } ;    27     struct gnet_stats_basic_packed {   __u64 bytes;   __u32 packets; } ;    41     struct gnet_stats_rate_est64 {   __u64 bps;   __u64 pps; } ;    51     struct gnet_stats_queue {   __u32 qlen;   __u32 backlog;   __u32 drops;   __u32 requeues;   __u32 overlimits; } ;   519     struct tcmsg {   unsigned char tcm_family;   unsigned char tcm__pad1;   unsigned short tcm__pad2;   int tcm_ifindex;   __u32 tcm_handle;   __u32 tcm_parent;   __u32 tcm_info; } ;   122     struct gnet_stats_basic_cpu {   struct gnet_stats_basic_packed bstats;   struct u64_stats_sync syncp; } ;    13     struct gnet_dump {   spinlock_t *lock;   struct sk_buff *skb;   struct nlattr *tail;   int compat_tc_stats;   int compat_xstats;   int padattr;   void *xstats;   int xstats_len;   struct tc_stats tc_stats; } ;    87     struct nla_policy {   u16 type;   u16 len; } ;    25     struct rtnl_link_ops {   struct list_head list;   const char *kind;   size_t priv_size;   void (*setup)(struct net_device *);   int maxtype;   const struct nla_policy *policy;   int (*validate)(struct nlattr **, struct nlattr **);   int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **);   int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **);   void (*dellink)(struct net_device *, struct list_head *);   size_t  (*get_size)(const struct net_device *);   int (*fill_info)(struct sk_buff *, const struct net_device *);   size_t  (*get_xstats_size)(const struct net_device *);   int (*fill_xstats)(struct sk_buff *, const struct net_device *);   unsigned int (*get_num_tx_queues)();   unsigned int (*get_num_rx_queues)();   int slave_maxtype;   const struct nla_policy *slave_policy;   int (*slave_validate)(struct nlattr **, struct nlattr **);   int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **);   size_t  (*get_slave_size)(const struct net_device *, const struct net_device *);   int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *);   struct net * (*get_link_net)(const struct net_device *);   size_t  (*get_linkxstats_size)(const struct net_device *, int);   int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ;   158     struct Qdisc_ops ;   159     struct qdisc_walker ;   160     struct tcf_walker ;    30     struct qdisc_size_table {   struct callback_head rcu;   struct list_head list;   struct tc_sizespec szopts;   int refcnt;   u16 data[]; } ;    38     struct Qdisc {   int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);   struct sk_buff * (*dequeue)(struct Qdisc *);   unsigned int flags;   u32 limit;   const struct Qdisc_ops *ops;   struct qdisc_size_table *stab;   struct list_head list;   u32 handle;   u32 parent;   void *u32_node;   struct netdev_queue *dev_queue;   struct gnet_stats_rate_est64 rate_est;   struct gnet_stats_basic_cpu *cpu_bstats;   struct gnet_stats_queue *cpu_qstats;   struct sk_buff *gso_skb;   struct sk_buff_head q;   struct gnet_stats_basic_packed bstats;   seqcount_t running;   struct gnet_stats_queue qstats;   unsigned long state;   struct Qdisc *next_sched;   struct sk_buff *skb_bad_txq;   struct callback_head callback_head;   int padded;   atomic_t refcnt;   spinlock_t busylock; } ;   126     struct Qdisc_class_ops {   struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);   int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **);   struct Qdisc * (*leaf)(struct Qdisc *, unsigned long);   void (*qlen_notify)(struct Qdisc *, unsigned long);   unsigned long int (*get)(struct Qdisc *, u32 );   void (*put)(struct Qdisc *, unsigned long);   int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *);   int (*delete)(struct Qdisc *, unsigned long);   void (*walk)(struct Qdisc *, struct qdisc_walker *);   struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);   bool  (*tcf_cl_offload)(u32 );   unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 );   void (*unbind_tcf)(struct Qdisc *, unsigned long);   int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *);   int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ;   158     struct Qdisc_ops {   struct Qdisc_ops *next;   const struct Qdisc_class_ops *cl_ops;   char id[16U];   int priv_size;   int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);   struct sk_buff * (*dequeue)(struct Qdisc *);   struct sk_buff * (*peek)(struct Qdisc *);   int (*init)(struct Qdisc *, struct nlattr *);   void (*reset)(struct Qdisc *);   void (*destroy)(struct Qdisc *);   int (*change)(struct Qdisc *, struct nlattr *);   void (*attach)(struct Qdisc *);   int (*dump)(struct Qdisc *, struct sk_buff *);   int (*dump_stats)(struct Qdisc *, struct gnet_dump *);   struct module *owner; } ;   183     struct tcf_result {   unsigned long class;   u32 classid; } ;   189     struct tcf_proto_ops {   struct list_head head;   char kind[16U];   int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);   int (*init)(struct tcf_proto *);   bool  (*destroy)(struct tcf_proto *, bool );   unsigned long int (*get)(struct tcf_proto *, u32 );   int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool );   int (*delete)(struct tcf_proto *, unsigned long);   void (*walk)(struct tcf_proto *, struct tcf_walker *);   int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *);   struct module *owner; } ;   214     struct tcf_proto {   struct tcf_proto *next;   void *root;   int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);   __be16 protocol;   u32 prio;   u32 classid;   struct Qdisc *q;   void *data;   const struct tcf_proto_ops *ops;   struct callback_head rcu; } ;   806     struct sock_filter {   __u16 code;   __u8 jt;   __u8 jf;   __u32 k; } ;    49     struct bpf_insn {   __u8 code;   unsigned char dst_reg;   unsigned char src_reg;   __s16 off;   __s32 imm; } ;    88     enum bpf_prog_type {   BPF_PROG_TYPE_UNSPEC = 0,   BPF_PROG_TYPE_SOCKET_FILTER = 1,   BPF_PROG_TYPE_KPROBE = 2,   BPF_PROG_TYPE_SCHED_CLS = 3,   BPF_PROG_TYPE_SCHED_ACT = 4,   BPF_PROG_TYPE_TRACEPOINT = 5,   BPF_PROG_TYPE_XDP = 6 } ;   472     struct bpf_prog_aux ;   323     struct sock_fprog_kern {   u16 len;   struct sock_filter *filter; } ;   334     union __anonunion____missing_field_name_505 {   struct sock_filter insns[0U];   struct bpf_insn insnsi[0U]; } ;   334     struct bpf_prog {   u16 pages;   unsigned char jited;   unsigned char gpl_compatible;   unsigned char cb_access;   unsigned char dst_needed;   u32 len;   enum bpf_prog_type type;   struct bpf_prog_aux *aux;   struct sock_fprog_kern *orig_prog;   unsigned int (*bpf_func)(const struct sk_buff *, const struct bpf_insn *);   union __anonunion____missing_field_name_505 __annonCompField118; } ;   355     struct sk_filter {   atomic_t refcnt;   struct callback_head rcu;   struct bpf_prog *prog; } ;   138     struct pollfd {   int fd;   short events;   short revents; } ;    32     struct poll_table_struct {   void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);   unsigned long _key; } ;   187     struct neigh_table ;   187     struct neigh_parms {   possible_net_t net;   struct net_device *dev;   struct list_head list;   int (*neigh_setup)(struct neighbour *);   void (*neigh_cleanup)(struct neighbour *);   struct neigh_table *tbl;   void *sysctl_table;   int dead;   atomic_t refcnt;   struct callback_head callback_head;   int reachable_time;   int data[13U];   unsigned long data_state[1U]; } ;   110     struct neigh_statistics {   unsigned long allocs;   unsigned long destroys;   unsigned long hash_grows;   unsigned long res_failed;   unsigned long lookups;   unsigned long hits;   unsigned long rcv_probes_mcast;   unsigned long rcv_probes_ucast;   unsigned long periodic_gc_runs;   unsigned long forced_gc_runs;   unsigned long unres_discards;   unsigned long table_fulls; } ;   130     struct neigh_ops ;   130     struct neighbour {   struct neighbour *next;   struct neigh_table *tbl;   struct neigh_parms *parms;   unsigned long confirmed;   unsigned long updated;   rwlock_t lock;   atomic_t refcnt;   struct sk_buff_head arp_queue;   unsigned int arp_queue_len_bytes;   struct timer_list timer;   unsigned long used;   atomic_t probes;   __u8 flags;   __u8 nud_state;   __u8 type;   __u8 dead;   seqlock_t ha_lock;   unsigned char ha[32U];   struct hh_cache hh;   int (*output)(struct neighbour *, struct sk_buff *);   const struct neigh_ops *ops;   struct callback_head rcu;   struct net_device *dev;   u8 primary_key[0U]; } ;   159     struct neigh_ops {   int family;   void (*solicit)(struct neighbour *, struct sk_buff *);   void (*error_report)(struct neighbour *, struct sk_buff *);   int (*output)(struct neighbour *, struct sk_buff *);   int (*connected_output)(struct neighbour *, struct sk_buff *); } ;   167     struct pneigh_entry {   struct pneigh_entry *next;   possible_net_t net;   struct net_device *dev;   u8 flags;   u8 key[0U]; } ;   175     struct neigh_hash_table {   struct neighbour **hash_buckets;   unsigned int hash_shift;   __u32 hash_rnd[4U];   struct callback_head rcu; } ;   188     struct neigh_table {   int family;   int entry_size;   int key_len;   __be16 protocol;   __u32  (*hash)(const void *, const struct net_device *, __u32 *);   bool  (*key_eq)(const struct neighbour *, const void *);   int (*constructor)(struct neighbour *);   int (*pconstructor)(struct pneigh_entry *);   void (*pdestructor)(struct pneigh_entry *);   void (*proxy_redo)(struct sk_buff *);   char *id;   struct neigh_parms parms;   struct list_head parms_list;   int gc_interval;   int gc_thresh1;   int gc_thresh2;   int gc_thresh3;   unsigned long last_flush;   struct delayed_work gc_work;   struct timer_list proxy_timer;   struct sk_buff_head proxy_queue;   atomic_t entries;   rwlock_t lock;   unsigned long last_rand;   struct neigh_statistics *stats;   struct neigh_hash_table *nht;   struct pneigh_entry **phash_buckets; } ;   520     struct lwtunnel_state ;   520     struct dn_route ;   520     union __anonunion____missing_field_name_521 {   struct dst_entry *next;   struct rtable *rt_next;   struct rt6_info *rt6_next;   struct dn_route *dn_next; } ;   520     struct dst_entry {   struct callback_head callback_head;   struct dst_entry *child;   struct net_device *dev;   struct dst_ops *ops;   unsigned long _metrics;   unsigned long expires;   struct dst_entry *path;   struct dst_entry *from;   struct xfrm_state *xfrm;   int (*input)(struct sk_buff *);   int (*output)(struct net *, struct sock *, struct sk_buff *);   unsigned short flags;   unsigned short pending_confirm;   short error;   short obsolete;   unsigned short header_len;   unsigned short trailer_len;   __u32 tclassid;   long __pad_to_align_refcnt[2U];   atomic_t __refcnt;   int __use;   unsigned long lastuse;   struct lwtunnel_state *lwtstate;   union __anonunion____missing_field_name_521 __annonCompField119; } ;   110     struct __anonstruct_socket_lock_t_522 {   spinlock_t slock;   int owned;   wait_queue_head_t wq;   struct lockdep_map dep_map; } ;   110     typedef struct __anonstruct_socket_lock_t_522 socket_lock_t;   110     struct proto ;   116     typedef __u32 __portpair;   117     typedef __u64 __addrpair;   118     struct __anonstruct____missing_field_name_524 {   __be32 skc_daddr;   __be32 skc_rcv_saddr; } ;   118     union __anonunion____missing_field_name_523 {   __addrpair skc_addrpair;   struct __anonstruct____missing_field_name_524 __annonCompField120; } ;   118     union __anonunion____missing_field_name_525 {   unsigned int skc_hash;   __u16 skc_u16hashes[2U]; } ;   118     struct __anonstruct____missing_field_name_527 {   __be16 skc_dport;   __u16 skc_num; } ;   118     union __anonunion____missing_field_name_526 {   __portpair skc_portpair;   struct __anonstruct____missing_field_name_527 __annonCompField123; } ;   118     union __anonunion____missing_field_name_528 {   struct hlist_node skc_bind_node;   struct hlist_node skc_portaddr_node; } ;   118     struct inet_timewait_death_row ;   118     union __anonunion____missing_field_name_529 {   unsigned long skc_flags;   struct sock *skc_listener;   struct inet_timewait_death_row *skc_tw_dr; } ;   118     union __anonunion____missing_field_name_530 {   struct hlist_node skc_node;   struct hlist_nulls_node skc_nulls_node; } ;   118     union __anonunion____missing_field_name_531 {   int skc_incoming_cpu;   u32 skc_rcv_wnd;   u32 skc_tw_rcv_nxt; } ;   118     union __anonunion____missing_field_name_532 {   u32 skc_rxhash;   u32 skc_window_clamp;   u32 skc_tw_snd_nxt; } ;   118     struct sock_common {   union __anonunion____missing_field_name_523 __annonCompField121;   union __anonunion____missing_field_name_525 __annonCompField122;   union __anonunion____missing_field_name_526 __annonCompField124;   unsigned short skc_family;   volatile unsigned char skc_state;   unsigned char skc_reuse;   unsigned char skc_reuseport;   unsigned char skc_ipv6only;   unsigned char skc_net_refcnt;   int skc_bound_dev_if;   union __anonunion____missing_field_name_528 __annonCompField125;   struct proto *skc_prot;   possible_net_t skc_net;   struct in6_addr skc_v6_daddr;   struct in6_addr skc_v6_rcv_saddr;   atomic64_t skc_cookie;   union __anonunion____missing_field_name_529 __annonCompField126;   int skc_dontcopy_begin[0U];   union __anonunion____missing_field_name_530 __annonCompField127;   int skc_tx_queue_mapping;   union __anonunion____missing_field_name_531 __annonCompField128;   atomic_t skc_refcnt;   int skc_dontcopy_end[0U];   union __anonunion____missing_field_name_532 __annonCompField129; } ;   230     struct __anonstruct_sk_backlog_533 {   atomic_t rmem_alloc;   int len;   struct sk_buff *head;   struct sk_buff *tail; } ;   230     union __anonunion____missing_field_name_534 {   struct socket_wq *sk_wq;   struct socket_wq *sk_wq_raw; } ;   230     struct sock_reuseport ;   230     struct sock {   struct sock_common __sk_common;   socket_lock_t sk_lock;   struct sk_buff_head sk_receive_queue;   struct __anonstruct_sk_backlog_533 sk_backlog;   int sk_forward_alloc;   __u32 sk_txhash;   unsigned int sk_napi_id;   unsigned int sk_ll_usec;   atomic_t sk_drops;   int sk_rcvbuf;   struct sk_filter *sk_filter;   union __anonunion____missing_field_name_534 __annonCompField130;   struct xfrm_policy *sk_policy[2U];   struct dst_entry *sk_rx_dst;   struct dst_entry *sk_dst_cache;   atomic_t sk_wmem_alloc;   atomic_t sk_omem_alloc;   int sk_sndbuf;   struct sk_buff_head sk_write_queue;   unsigned char sk_padding;   unsigned char sk_no_check_tx;   unsigned char sk_no_check_rx;   unsigned char sk_userlocks;   unsigned char sk_protocol;   unsigned short sk_type;   int sk_wmem_queued;   gfp_t sk_allocation;   u32 sk_pacing_rate;   u32 sk_max_pacing_rate;   netdev_features_t sk_route_caps;   netdev_features_t sk_route_nocaps;   int sk_gso_type;   unsigned int sk_gso_max_size;   u16 sk_gso_max_segs;   int sk_rcvlowat;   unsigned long sk_lingertime;   struct sk_buff_head sk_error_queue;   struct proto *sk_prot_creator;   rwlock_t sk_callback_lock;   int sk_err;   int sk_err_soft;   u32 sk_ack_backlog;   u32 sk_max_ack_backlog;   __u32 sk_priority;   __u32 sk_mark;   struct pid *sk_peer_pid;   const struct cred *sk_peer_cred;   long sk_rcvtimeo;   long sk_sndtimeo;   struct timer_list sk_timer;   ktime_t sk_stamp;   u16 sk_tsflags;   u8 sk_shutdown;   u32 sk_tskey;   struct socket *sk_socket;   void *sk_user_data;   struct page_frag sk_frag;   struct sk_buff *sk_send_head;   __s32 sk_peek_off;   int sk_write_pending;   void *sk_security;   struct sock_cgroup_data sk_cgrp_data;   struct mem_cgroup *sk_memcg;   void (*sk_state_change)(struct sock *);   void (*sk_data_ready)(struct sock *);   void (*sk_write_space)(struct sock *);   void (*sk_error_report)(struct sock *);   int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);   void (*sk_destruct)(struct sock *);   struct sock_reuseport *sk_reuseport_cb;   struct callback_head sk_rcu; } ;   948     struct request_sock_ops ;   949     struct timewait_sock_ops ;   950     struct inet_hashinfo ;   951     struct raw_hashinfo ;   965     struct udp_table ;   965     union __anonunion_h_545 {   struct inet_hashinfo *hashinfo;   struct udp_table *udp_table;   struct raw_hashinfo *raw_hash; } ;   965     struct proto {   void (*close)(struct sock *, long);   int (*connect)(struct sock *, struct sockaddr *, int);   int (*disconnect)(struct sock *, int);   struct sock * (*accept)(struct sock *, int, int *);   int (*ioctl)(struct sock *, int, unsigned long);   int (*init)(struct sock *);   void (*destroy)(struct sock *);   void (*shutdown)(struct sock *, int);   int (*setsockopt)(struct sock *, int, int, char *, unsigned int);   int (*getsockopt)(struct sock *, int, int, char *, int *);   int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int);   int (*compat_getsockopt)(struct sock *, int, int, char *, int *);   int (*compat_ioctl)(struct sock *, unsigned int, unsigned long);   int (*sendmsg)(struct sock *, struct msghdr *, size_t );   int (*recvmsg)(struct sock *, struct msghdr *, size_t , int, int, int *);   int (*sendpage)(struct sock *, struct page *, int, size_t , int);   int (*bind)(struct sock *, struct sockaddr *, int);   int (*backlog_rcv)(struct sock *, struct sk_buff *);   void (*release_cb)(struct sock *);   int (*hash)(struct sock *);   void (*unhash)(struct sock *);   void (*rehash)(struct sock *);   int (*get_port)(struct sock *, unsigned short);   void (*clear_sk)(struct sock *, int);   unsigned int inuse_idx;   bool  (*stream_memory_free)(const struct sock *);   void (*enter_memory_pressure)(struct sock *);   atomic_long_t *memory_allocated;   struct percpu_counter *sockets_allocated;   int *memory_pressure;   long *sysctl_mem;   int *sysctl_wmem;   int *sysctl_rmem;   int max_header;   bool no_autobind;   struct kmem_cache *slab;   unsigned int obj_size;   int slab_flags;   struct percpu_counter *orphan_count;   struct request_sock_ops *rsk_prot;   struct timewait_sock_ops *twsk_prot;   union __anonunion_h_545 h;   struct module *owner;   char name[32U];   struct list_head node;   int (*diag_destroy)(struct sock *, int); } ;   174     struct request_sock_ops {   int family;   int obj_size;   struct kmem_cache *slab;   char *slab_name;   int (*rtx_syn_ack)(const struct sock *, struct request_sock *);   void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *);   void (*send_reset)(const struct sock *, struct sk_buff *);   void (*destructor)(struct request_sock *);   void (*syn_ack_timeout)(const struct request_sock *); } ;    46     struct request_sock {   struct sock_common __req_common;   struct request_sock *dl_next;   u16 mss;   u8 num_retrans;   unsigned char cookie_ts;   unsigned char num_timeout;   u32 ts_recent;   struct timer_list rsk_timer;   const struct request_sock_ops *rsk_ops;   struct sock *sk;   u32 *saved_syn;   u32 secid;   u32 peer_secid; } ;    18     struct fib_rule_hdr {   __u8 family;   __u8 dst_len;   __u8 src_len;   __u8 tos;   __u8 table;   __u8 res1;   __u8 res2;   __u8 action;   __u32 flags; } ;    68     struct fib_rule {   struct list_head list;   int iifindex;   int oifindex;   u32 mark;   u32 mark_mask;   u32 flags;   u32 table;   u8 action;   u8 l3mdev;   u32 target;   __be64 tun_id;   struct fib_rule *ctarget;   struct net *fr_net;   atomic_t refcnt;   u32 pref;   int suppress_ifgroup;   int suppress_prefixlen;   char iifname[16U];   char oifname[16U];   struct callback_head rcu; } ;    35     struct fib_lookup_arg {   void *lookup_ptr;   void *result;   struct fib_rule *rule;   u32 table;   int flags; } ;    43     struct fib_rules_ops {   int family;   struct list_head list;   int rule_size;   int addr_size;   int unresolved_rules;   int nr_goto_rules;   int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *);   bool  (*suppress)(struct fib_rule *, struct fib_lookup_arg *);   int (*match)(struct fib_rule *, struct flowi *, int);   int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **);   int (*delete)(struct fib_rule *);   int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **);   int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *);   size_t  (*nlmsg_payload)(struct fib_rule *);   void (*flush_cache)(struct fib_rules_ops *);   int nlgroup;   const struct nla_policy *policy;   struct list_head rules_list;   struct module *owner;   struct net *fro_net;   struct callback_head rcu; } ;   140     struct l3mdev_ops {   u32  (*l3mdev_fib_table)(const struct net_device *);   struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16 );   struct rtable * (*l3mdev_get_rtable)(const struct net_device *, const struct flowi4 *);   int (*l3mdev_get_saddr)(struct net_device *, struct flowi4 *);   struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *, struct flowi6 *);   int (*l3mdev_get_saddr6)(struct net_device *, const struct sock *, struct flowi6 *); } ;   328     struct timewait_sock_ops {   struct kmem_cache *twsk_slab;   char *twsk_slab_name;   unsigned int twsk_obj_size;   int (*twsk_unique)(struct sock *, struct sock *, void *);   void (*twsk_destructor)(struct sock *); } ;    39     struct inet_timewait_death_row {   atomic_t tw_count;   struct inet_hashinfo *hashinfo;   int sysctl_tw_recycle;   int sysctl_max_tw_buckets; } ;   100     struct ip6_sf_list {   struct ip6_sf_list *sf_next;   struct in6_addr sf_addr;   unsigned long sf_count[2U];   unsigned char sf_gsresp;   unsigned char sf_oldin;   unsigned char sf_crcount; } ;   109     struct ifmcaddr6 {   struct in6_addr mca_addr;   struct inet6_dev *idev;   struct ifmcaddr6 *next;   struct ip6_sf_list *mca_sources;   struct ip6_sf_list *mca_tomb;   unsigned int mca_sfmode;   unsigned char mca_crcount;   unsigned long mca_sfcount[2U];   struct timer_list mca_timer;   unsigned int mca_flags;   int mca_users;   atomic_t mca_refcnt;   spinlock_t mca_lock;   unsigned long mca_cstamp;   unsigned long mca_tstamp; } ;   141     struct ifacaddr6 {   struct in6_addr aca_addr;   struct inet6_dev *aca_idev;   struct rt6_info *aca_rt;   struct ifacaddr6 *aca_next;   int aca_users;   atomic_t aca_refcnt;   unsigned long aca_cstamp;   unsigned long aca_tstamp; } ;   152     struct ipv6_devstat {   struct proc_dir_entry *proc_dir_entry;   struct ipstats_mib *ipv6;   struct icmpv6_mib_device *icmpv6dev;   struct icmpv6msg_mib_device *icmpv6msgdev; } ;   163     struct inet6_dev {   struct net_device *dev;   struct list_head addr_list;   struct ifmcaddr6 *mc_list;   struct ifmcaddr6 *mc_tomb;   spinlock_t mc_lock;   unsigned char mc_qrv;   unsigned char mc_gq_running;   unsigned char mc_ifc_count;   unsigned char mc_dad_count;   unsigned long mc_v1_seen;   unsigned long mc_qi;   unsigned long mc_qri;   unsigned long mc_maxdelay;   struct timer_list mc_gq_timer;   struct timer_list mc_ifc_timer;   struct timer_list mc_dad_timer;   struct ifacaddr6 *ac_list;   rwlock_t lock;   atomic_t refcnt;   __u32 if_flags;   int dead;   u8 rndid[8U];   struct timer_list regen_timer;   struct list_head tempaddr_list;   struct in6_addr token;   struct neigh_parms *nd_parms;   struct ipv6_devconf cnf;   struct ipv6_devstat stats;   struct timer_list rs_timer;   __u8 rs_probes;   __u8 addr_gen_mode;   unsigned long tstamp;   struct callback_head rcu; } ;    47     struct prefix_info ;    98     struct nd_opt_hdr {   __u8 nd_opt_type;   __u8 nd_opt_len; } ;   103     struct ndisc_options {   struct nd_opt_hdr *nd_opt_array[6U];   struct nd_opt_hdr *nd_opts_ri;   struct nd_opt_hdr *nd_opts_ri_end;   struct nd_opt_hdr *nd_useropts;   struct nd_opt_hdr *nd_useropts_end;   struct nd_opt_hdr *nd_802154_opt_array[3U]; } ;   134     struct ndisc_ops {   int (*is_useropt)(u8 );   int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *);   void (*update)(const struct net_device *, struct neighbour *, u32 , u8 , const struct ndisc_options *);   int (*opt_addr_space)(const struct net_device *, u8 , struct neighbour *, u8 *, u8 **);   void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8 , const u8 *);   void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32 , bool , bool , __u32 , u32 , bool ); } ;    37     struct ipv4_addr_key {   __be32 addr;   int vif; } ;    23     union __anonunion____missing_field_name_583 {   struct ipv4_addr_key a4;   struct in6_addr a6;   u32 key[4U]; } ;    23     struct inetpeer_addr {   union __anonunion____missing_field_name_583 __annonCompField133;   __u16 family; } ;    34     union __anonunion____missing_field_name_584 {   struct list_head gc_list;   struct callback_head gc_rcu; } ;    34     struct __anonstruct____missing_field_name_586 {   atomic_t rid; } ;    34     union __anonunion____missing_field_name_585 {   struct __anonstruct____missing_field_name_586 __annonCompField135;   struct callback_head rcu;   struct inet_peer *gc_next; } ;    34     struct inet_peer {   struct inet_peer *avl_left;   struct inet_peer *avl_right;   struct inetpeer_addr daddr;   __u32 avl_height;   u32 metrics[16U];   u32 rate_tokens;   unsigned long rate_last;   union __anonunion____missing_field_name_584 __annonCompField134;   union __anonunion____missing_field_name_585 __annonCompField136;   __u32 dtime;   atomic_t refcnt; } ;    65     struct inet_peer_base {   struct inet_peer *root;   seqlock_t lock;   int total; } ;   174     struct fib_table {   struct hlist_node tb_hlist;   u32 tb_id;   int tb_num_default;   struct callback_head rcu;   unsigned long *tb_data;   unsigned long __data[0U]; } ;    48     struct uncached_list ;    49     struct rtable {   struct dst_entry dst;   int rt_genid;   unsigned int rt_flags;   __u16 rt_type;   __u8 rt_is_input;   __u8 rt_uses_gateway;   int rt_iif;   __be32 rt_gateway;   u32 rt_pmtu;   u32 rt_table_id;   struct list_head rt_uncached;   struct uncached_list *rt_uncached_list; } ;   213     struct in_ifaddr ;   583     struct mmu_notifier ;   584     struct mmu_notifier_ops ;   585     struct mmu_notifier_mm {   struct hlist_head list;   spinlock_t lock; } ;    26     struct mmu_notifier_ops {   void (*release)(struct mmu_notifier *, struct mm_struct *);   int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long);   int (*clear_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long);   int (*test_young)(struct mmu_notifier *, struct mm_struct *, unsigned long);   void (*change_pte)(struct mmu_notifier *, struct mm_struct *, unsigned long, pte_t );   void (*invalidate_page)(struct mmu_notifier *, struct mm_struct *, unsigned long);   void (*invalidate_range_start)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long);   void (*invalidate_range_end)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long);   void (*invalidate_range)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); } ;   180     struct mmu_notifier {   struct hlist_node hlist;   const struct mmu_notifier_ops *ops; } ;    66     struct __anonstruct_global_594 {   __be64 subnet_prefix;   __be64 interface_id; } ;    66     union ib_gid {   u8 raw[16U];   struct __anonstruct_global_594 global; } ;    76     enum ib_gid_type {   IB_GID_TYPE_IB = 0,   IB_GID_TYPE_ROCE = 0,   IB_GID_TYPE_ROCE_UDP_ENCAP = 1,   IB_GID_TYPE_SIZE = 2 } ;    83     struct ib_gid_attr {   enum ib_gid_type gid_type;   struct net_device *ndev; } ;   151     enum rdma_link_layer {   IB_LINK_LAYER_UNSPECIFIED = 0,   IB_LINK_LAYER_INFINIBAND = 1,   IB_LINK_LAYER_ETHERNET = 2 } ;   205     enum ib_atomic_cap {   IB_ATOMIC_NONE = 0,   IB_ATOMIC_HCA = 1,   IB_ATOMIC_GLOB = 2 } ;   223     struct __anonstruct_per_transport_caps_595 {   uint32_t rc_odp_caps;   uint32_t uc_odp_caps;   uint32_t ud_odp_caps; } ;   223     struct ib_odp_caps {   uint64_t general_caps;   struct __anonstruct_per_transport_caps_595 per_transport_caps; } ;   268     struct ib_cq_init_attr {   unsigned int cqe;   int comp_vector;   u32 flags; } ;   274     struct ib_device_attr {   u64 fw_ver;   __be64 sys_image_guid;   u64 max_mr_size;   u64 page_size_cap;   u32 vendor_id;   u32 vendor_part_id;   u32 hw_ver;   int max_qp;   int max_qp_wr;   u64 device_cap_flags;   int max_sge;   int max_sge_rd;   int max_cq;   int max_cqe;   int max_mr;   int max_pd;   int max_qp_rd_atom;   int max_ee_rd_atom;   int max_res_rd_atom;   int max_qp_init_rd_atom;   int max_ee_init_rd_atom;   enum ib_atomic_cap atomic_cap;   enum ib_atomic_cap masked_atomic_cap;   int max_ee;   int max_rdd;   int max_mw;   int max_raw_ipv6_qp;   int max_raw_ethy_qp;   int max_mcast_grp;   int max_mcast_qp_attach;   int max_total_mcast_qp_attach;   int max_ah;   int max_fmr;   int max_map_per_fmr;   int max_srq;   int max_srq_wr;   int max_srq_sge;   unsigned int max_fast_reg_page_list_len;   u16 max_pkeys;   u8 local_ca_ack_delay;   int sig_prot_cap;   int sig_guard_cap;   struct ib_odp_caps odp_caps;   uint64_t timestamp_mask;   uint64_t hca_core_clock; } ;   322     enum ib_mtu {   IB_MTU_256 = 1,   IB_MTU_512 = 2,   IB_MTU_1024 = 3,   IB_MTU_2048 = 4,   IB_MTU_4096 = 5 } ;   342     enum ib_port_state {   IB_PORT_NOP = 0,   IB_PORT_DOWN = 1,   IB_PORT_INIT = 2,   IB_PORT_ARMED = 3,   IB_PORT_ACTIVE = 4,   IB_PORT_ACTIVE_DEFER = 5 } ;   405     struct rdma_hw_stats {   unsigned long timestamp;   unsigned long lifespan;   const const char **names;   int num_counters;   u64 value[]; } ;   454     struct ib_port_attr {   u64 subnet_prefix;   enum ib_port_state state;   enum ib_mtu max_mtu;   enum ib_mtu active_mtu;   int gid_tbl_len;   u32 port_cap_flags;   u32 max_msg_sz;   u32 bad_pkey_cntr;   u32 qkey_viol_cntr;   u16 pkey_tbl_len;   u16 lid;   u16 sm_lid;   u8 lmc;   u8 max_vl_num;   u8 sm_sl;   u8 subnet_timeout;   u8 init_type_reply;   u8 active_width;   u8 active_speed;   u8 phys_state;   bool grh_required; } ;   527     struct ib_device_modify {   u64 sys_image_guid;   char node_desc[64U]; } ;   538     struct ib_port_modify {   u32 set_port_cap_mask;   u32 clr_port_cap_mask;   u8 init_type; } ;   544     enum ib_event_type {   IB_EVENT_CQ_ERR = 0,   IB_EVENT_QP_FATAL = 1,   IB_EVENT_QP_REQ_ERR = 2,   IB_EVENT_QP_ACCESS_ERR = 3,   IB_EVENT_COMM_EST = 4,   IB_EVENT_SQ_DRAINED = 5,   IB_EVENT_PATH_MIG = 6,   IB_EVENT_PATH_MIG_ERR = 7,   IB_EVENT_DEVICE_FATAL = 8,   IB_EVENT_PORT_ACTIVE = 9,   IB_EVENT_PORT_ERR = 10,   IB_EVENT_LID_CHANGE = 11,   IB_EVENT_PKEY_CHANGE = 12,   IB_EVENT_SM_CHANGE = 13,   IB_EVENT_SRQ_ERR = 14,   IB_EVENT_SRQ_LIMIT_REACHED = 15,   IB_EVENT_QP_LAST_WQE_REACHED = 16,   IB_EVENT_CLIENT_REREGISTER = 17,   IB_EVENT_GID_CHANGE = 18,   IB_EVENT_WQ_FATAL = 19 } ;   569     struct ib_device ;   569     struct ib_cq ;   569     struct ib_qp ;   569     struct ib_srq ;   569     struct ib_wq ;   569     union __anonunion_element_596 {   struct ib_cq *cq;   struct ib_qp *qp;   struct ib_srq *srq;   struct ib_wq *wq;   u8 port_num; } ;   569     struct ib_event {   struct ib_device *device;   union __anonunion_element_596 element;   enum ib_event_type event; } ;   581     struct ib_event_handler {   struct ib_device *device;   void (*handler)(struct ib_event_handler *, struct ib_event *);   struct list_head list; } ;   587     struct ib_global_route {   union ib_gid dgid;   u32 flow_label;   u8 sgid_index;   u8 hop_limit;   u8 traffic_class; } ;   602     struct ib_grh {   __be32 version_tclass_flow;   __be16 paylen;   u8 next_hdr;   u8 hop_limit;   union ib_gid sgid;   union ib_gid dgid; } ;   669     enum ib_mr_type {   IB_MR_TYPE_MEM_REG = 0,   IB_MR_TYPE_SIGNATURE = 1,   IB_MR_TYPE_SG_GAPS = 2 } ;   758     enum ib_sig_err_type {   IB_SIG_BAD_GUARD = 0,   IB_SIG_BAD_REFTAG = 1,   IB_SIG_BAD_APPTAG = 2 } ;   764     struct ib_sig_err {   enum ib_sig_err_type err_type;   u32 expected;   u32 actual;   u64 sig_err_offset;   u32 key; } ;   779     struct ib_mr_status {   u32 fail_status;   struct ib_sig_err sig_err; } ;   799     struct ib_ah_attr {   struct ib_global_route grh;   u16 dlid;   u8 sl;   u8 src_path_bits;   u8 static_rate;   u8 ah_flags;   u8 port_num;   u8 dmac[6U]; } ;   810     enum ib_wc_status {   IB_WC_SUCCESS = 0,   IB_WC_LOC_LEN_ERR = 1,   IB_WC_LOC_QP_OP_ERR = 2,   IB_WC_LOC_EEC_OP_ERR = 3,   IB_WC_LOC_PROT_ERR = 4,   IB_WC_WR_FLUSH_ERR = 5,   IB_WC_MW_BIND_ERR = 6,   IB_WC_BAD_RESP_ERR = 7,   IB_WC_LOC_ACCESS_ERR = 8,   IB_WC_REM_INV_REQ_ERR = 9,   IB_WC_REM_ACCESS_ERR = 10,   IB_WC_REM_OP_ERR = 11,   IB_WC_RETRY_EXC_ERR = 12,   IB_WC_RNR_RETRY_EXC_ERR = 13,   IB_WC_LOC_RDD_VIOL_ERR = 14,   IB_WC_REM_INV_RD_REQ_ERR = 15,   IB_WC_REM_ABORT_ERR = 16,   IB_WC_INV_EECN_ERR = 17,   IB_WC_INV_EEC_STATE_ERR = 18,   IB_WC_FATAL_ERR = 19,   IB_WC_RESP_TIMEOUT_ERR = 20,   IB_WC_GENERAL_ERR = 21 } ;   837     enum ib_wc_opcode {   IB_WC_SEND = 0,   IB_WC_RDMA_WRITE = 1,   IB_WC_RDMA_READ = 2,   IB_WC_COMP_SWAP = 3,   IB_WC_FETCH_ADD = 4,   IB_WC_LSO = 5,   IB_WC_LOCAL_INV = 6,   IB_WC_REG_MR = 7,   IB_WC_MASKED_COMP_SWAP = 8,   IB_WC_MASKED_FETCH_ADD = 9,   IB_WC_RECV = 128,   IB_WC_RECV_RDMA_WITH_IMM = 129 } ;   862     struct ib_cqe ;   862     union __anonunion____missing_field_name_599 {   u64 wr_id;   struct ib_cqe *wr_cqe; } ;   862     union __anonunion_ex_600 {   __be32 imm_data;   u32 invalidate_rkey; } ;   862     struct ib_wc {   union __anonunion____missing_field_name_599 __annonCompField139;   enum ib_wc_status status;   enum ib_wc_opcode opcode;   u32 vendor_err;   u32 byte_len;   struct ib_qp *qp;   union __anonunion_ex_600 ex;   u32 src_qp;   int wc_flags;   u16 pkey_index;   u16 slid;   u8 sl;   u8 dlid_path_bits;   u8 port_num;   u8 smac[6U];   u16 vlan_id;   u8 network_hdr_type; } ;   892     enum ib_cq_notify_flags {   IB_CQ_SOLICITED = 1,   IB_CQ_NEXT_COMP = 2,   IB_CQ_SOLICITED_MASK = 3,   IB_CQ_REPORT_MISSED_EVENTS = 4 } ;   899     enum ib_srq_type {   IB_SRQT_BASIC = 0,   IB_SRQT_XRC = 1 } ;   904     enum ib_srq_attr_mask {   IB_SRQ_MAX_WR = 1,   IB_SRQ_LIMIT = 2 } ;   909     struct ib_srq_attr {   u32 max_wr;   u32 max_sge;   u32 srq_limit; } ;   915     struct ib_xrcd ;   915     struct __anonstruct_xrc_602 {   struct ib_xrcd *xrcd;   struct ib_cq *cq; } ;   915     union __anonunion_ext_601 {   struct __anonstruct_xrc_602 xrc; } ;   915     struct ib_srq_init_attr {   void (*event_handler)(struct ib_event *, void *);   void *srq_context;   struct ib_srq_attr attr;   enum ib_srq_type srq_type;   union __anonunion_ext_601 ext; } ;   929     struct ib_qp_cap {   u32 max_send_wr;   u32 max_recv_wr;   u32 max_send_sge;   u32 max_recv_sge;   u32 max_inline_data;   u32 max_rdma_ctxs; } ;   944     enum ib_sig_type {   IB_SIGNAL_ALL_WR = 0,   IB_SIGNAL_REQ_WR = 1 } ;   949     enum ib_qp_type {   IB_QPT_SMI = 0,   IB_QPT_GSI = 1,   IB_QPT_RC = 2,   IB_QPT_UC = 3,   IB_QPT_UD = 4,   IB_QPT_RAW_IPV6 = 5,   IB_QPT_RAW_ETHERTYPE = 6,   IB_QPT_RAW_PACKET = 8,   IB_QPT_XRC_INI = 9,   IB_QPT_XRC_TGT = 10,   IB_QPT_MAX = 11,   IB_QPT_RESERVED1 = 4096,   IB_QPT_RESERVED2 = 4097,   IB_QPT_RESERVED3 = 4098,   IB_QPT_RESERVED4 = 4099,   IB_QPT_RESERVED5 = 4100,   IB_QPT_RESERVED6 = 4101,   IB_QPT_RESERVED7 = 4102,   IB_QPT_RESERVED8 = 4103,   IB_QPT_RESERVED9 = 4104,   IB_QPT_RESERVED10 = 4105 } ;   973     enum ib_qp_create_flags {   IB_QP_CREATE_IPOIB_UD_LSO = 1,   IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2,   IB_QP_CREATE_CROSS_CHANNEL = 4,   IB_QP_CREATE_MANAGED_SEND = 8,   IB_QP_CREATE_MANAGED_RECV = 16,   IB_QP_CREATE_NETIF_QP = 32,   IB_QP_CREATE_SIGNATURE_EN = 64,   IB_QP_CREATE_USE_GFP_NOIO = 128,   IB_QP_CREATE_SCATTER_FCS = 256,   IB_QP_CREATE_RESERVED_START = 67108864,   IB_QP_CREATE_RESERVED_END = -2147483648 } ;   987     struct ib_rwq_ind_table ;   987     struct ib_qp_init_attr {   void (*event_handler)(struct ib_event *, void *);   void *qp_context;   struct ib_cq *send_cq;   struct ib_cq *recv_cq;   struct ib_srq *srq;   struct ib_xrcd *xrcd;   struct ib_qp_cap cap;   enum ib_sig_type sq_sig_type;   enum ib_qp_type qp_type;   enum ib_qp_create_flags create_flags;   u8 port_num;   struct ib_rwq_ind_table *rwq_ind_tbl; } ;  1092     enum ib_qp_state {   IB_QPS_RESET = 0,   IB_QPS_INIT = 1,   IB_QPS_RTR = 2,   IB_QPS_RTS = 3,   IB_QPS_SQD = 4,   IB_QPS_SQE = 5,   IB_QPS_ERR = 6 } ;  1102     enum ib_mig_state {   IB_MIG_MIGRATED = 0,   IB_MIG_REARM = 1,   IB_MIG_ARMED = 2 } ;  1108     enum ib_mw_type {   IB_MW_TYPE_1 = 1,   IB_MW_TYPE_2 = 2 } ;  1113     struct ib_qp_attr {   enum ib_qp_state qp_state;   enum ib_qp_state cur_qp_state;   enum ib_mtu path_mtu;   enum ib_mig_state path_mig_state;   u32 qkey;   u32 rq_psn;   u32 sq_psn;   u32 dest_qp_num;   int qp_access_flags;   struct ib_qp_cap cap;   struct ib_ah_attr ah_attr;   struct ib_ah_attr alt_ah_attr;   u16 pkey_index;   u16 alt_pkey_index;   u8 en_sqd_async_notify;   u8 sq_draining;   u8 max_rd_atomic;   u8 max_dest_rd_atomic;   u8 min_rnr_timer;   u8 port_num;   u8 timeout;   u8 retry_cnt;   u8 rnr_retry;   u8 alt_port_num;   u8 alt_timeout; } ;  1141     enum ib_wr_opcode {   IB_WR_RDMA_WRITE = 0,   IB_WR_RDMA_WRITE_WITH_IMM = 1,   IB_WR_SEND = 2,   IB_WR_SEND_WITH_IMM = 3,   IB_WR_RDMA_READ = 4,   IB_WR_ATOMIC_CMP_AND_SWP = 5,   IB_WR_ATOMIC_FETCH_AND_ADD = 6,   IB_WR_LSO = 7,   IB_WR_SEND_WITH_INV = 8,   IB_WR_RDMA_READ_WITH_INV = 9,   IB_WR_LOCAL_INV = 10,   IB_WR_REG_MR = 11,   IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,   IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,   IB_WR_REG_SIG_MR = 14,   IB_WR_RESERVED1 = 240,   IB_WR_RESERVED2 = 241,   IB_WR_RESERVED3 = 242,   IB_WR_RESERVED4 = 243,   IB_WR_RESERVED5 = 244,   IB_WR_RESERVED6 = 245,   IB_WR_RESERVED7 = 246,   IB_WR_RESERVED8 = 247,   IB_WR_RESERVED9 = 248,   IB_WR_RESERVED10 = 249 } ;  1179     struct ib_sge {   u64 addr;   u32 length;   u32 lkey; } ;  1190     struct ib_cqe {   void (*done)(struct ib_cq *, struct ib_wc *); } ;  1194     union __anonunion____missing_field_name_603 {   u64 wr_id;   struct ib_cqe *wr_cqe; } ;  1194     union __anonunion_ex_604 {   __be32 imm_data;   u32 invalidate_rkey; } ;  1194     struct ib_send_wr {   struct ib_send_wr *next;   union __anonunion____missing_field_name_603 __annonCompField140;   struct ib_sge *sg_list;   int num_sge;   enum ib_wr_opcode opcode;   int send_flags;   union __anonunion_ex_604 ex; } ;  1237     struct ib_ah ;  1254     struct ib_mr ;  1279     union __anonunion____missing_field_name_605 {   u64 wr_id;   struct ib_cqe *wr_cqe; } ;  1279     struct ib_recv_wr {   struct ib_recv_wr *next;   union __anonunion____missing_field_name_605 __annonCompField141;   struct ib_sge *sg_list;   int num_sge; } ;  1305     struct ib_fmr_attr {   int max_pages;   int max_maps;   u8 page_shift; } ;  1315     struct ib_umem ;  1316     struct ib_ucontext {   struct ib_device *device;   struct list_head pd_list;   struct list_head mr_list;   struct list_head mw_list;   struct list_head cq_list;   struct list_head qp_list;   struct list_head srq_list;   struct list_head ah_list;   struct list_head xrcd_list;   struct list_head rule_list;   struct list_head wq_list;   struct list_head rwq_ind_tbl_list;   int closing;   struct pid *tgid;   struct rb_root umem_tree;   struct rw_semaphore umem_rwsem;   void (*invalidate_range)(struct ib_umem *, unsigned long, unsigned long);   struct mmu_notifier mn;   atomic_t notifier_count;   struct list_head no_private_counters;   int odp_mrs_count; } ;  1350     struct ib_uobject {   u64 user_handle;   struct ib_ucontext *context;   void *object;   struct list_head list;   int id;   struct kref ref;   struct rw_semaphore mutex;   struct callback_head rcu;   int live; } ;  1363     struct ib_udata {   const void *inbuf;   void *outbuf;   size_t inlen;   size_t outlen; } ;  1370     struct ib_pd {   u32 local_dma_lkey;   struct ib_device *device;   struct ib_uobject *uobject;   atomic_t usecnt;   struct ib_mr *local_mr; } ;  1378     struct ib_xrcd {   struct ib_device *device;   atomic_t usecnt;   struct inode *inode;   struct mutex tgt_qp_mutex;   struct list_head tgt_qp_list; } ;  1387     struct ib_ah {   struct ib_device *device;   struct ib_pd *pd;   struct ib_uobject *uobject; } ;  1395     enum ib_poll_context {   IB_POLL_DIRECT = 0,   IB_POLL_SOFTIRQ = 1,   IB_POLL_WORKQUEUE = 2 } ;  1401     union __anonunion____missing_field_name_606 {   struct irq_poll iop;   struct work_struct work; } ;  1401     struct ib_cq {   struct ib_device *device;   struct ib_uobject *uobject;   void (*comp_handler)(struct ib_cq *, void *);   void (*event_handler)(struct ib_event *, void *);   void *cq_context;   int cqe;   atomic_t usecnt;   enum ib_poll_context poll_ctx;   struct ib_wc *wc;   union __anonunion____missing_field_name_606 __annonCompField142; } ;  1417     struct __anonstruct_xrc_608 {   struct ib_xrcd *xrcd;   struct ib_cq *cq;   u32 srq_num; } ;  1417     union __anonunion_ext_607 {   struct __anonstruct_xrc_608 xrc; } ;  1417     struct ib_srq {   struct ib_device *device;   struct ib_pd *pd;   struct ib_uobject *uobject;   void (*event_handler)(struct ib_event *, void *);   void *srq_context;   enum ib_srq_type srq_type;   atomic_t usecnt;   union __anonunion_ext_607 ext; } ;  1435     enum ib_wq_type {   IB_WQT_RQ = 0 } ;  1439     enum ib_wq_state {   IB_WQS_RESET = 0,   IB_WQS_RDY = 1,   IB_WQS_ERR = 2 } ;  1445     struct ib_wq {   struct ib_device *device;   struct ib_uobject *uobject;   void *wq_context;   void (*event_handler)(struct ib_event *, void *);   struct ib_pd *pd;   struct ib_cq *cq;   u32 wq_num;   enum ib_wq_state state;   enum ib_wq_type wq_type;   atomic_t usecnt; } ;  1458     struct ib_wq_init_attr {   void *wq_context;   enum ib_wq_type wq_type;   u32 max_wr;   u32 max_sge;   struct ib_cq *cq;   void (*event_handler)(struct ib_event *, void *); } ;  1472     struct ib_wq_attr {   enum ib_wq_state wq_state;   enum ib_wq_state curr_wq_state; } ;  1477     struct ib_rwq_ind_table {   struct ib_device *device;   struct ib_uobject *uobject;   atomic_t usecnt;   u32 ind_tbl_num;   u32 log_ind_tbl_size;   struct ib_wq **ind_tbl; } ;  1486     struct ib_rwq_ind_table_init_attr {   u32 log_ind_tbl_size;   struct ib_wq **ind_tbl; } ;  1492     struct ib_qp {   struct ib_device *device;   struct ib_pd *pd;   struct ib_cq *send_cq;   struct ib_cq *recv_cq;   spinlock_t mr_lock;   int mrs_used;   struct list_head rdma_mrs;   struct list_head sig_mrs;   struct ib_srq *srq;   struct ib_xrcd *xrcd;   struct list_head xrcd_list;   atomic_t usecnt;   struct list_head open_list;   struct ib_qp *real_qp;   struct ib_uobject *uobject;   void (*event_handler)(struct ib_event *, void *);   void *qp_context;   u32 qp_num;   u32 max_write_sge;   u32 max_read_sge;   enum ib_qp_type qp_type;   struct ib_rwq_ind_table *rwq_ind_tbl; } ;  1523     union __anonunion____missing_field_name_609 {   struct ib_uobject *uobject;   struct list_head qp_entry; } ;  1523     struct ib_mr {   struct ib_device *device;   struct ib_pd *pd;   u32 lkey;   u32 rkey;   u64 iova;   u32 length;   unsigned int page_size;   bool need_inval;   union __anonunion____missing_field_name_609 __annonCompField143; } ;  1538     struct ib_mw {   struct ib_device *device;   struct ib_pd *pd;   struct ib_uobject *uobject;   u32 rkey;   enum ib_mw_type type; } ;  1546     struct ib_fmr {   struct ib_device *device;   struct ib_pd *pd;   struct list_head list;   u32 lkey;   u32 rkey; } ;  1554     enum ib_flow_attr_type {   IB_FLOW_ATTR_NORMAL = 0,   IB_FLOW_ATTR_ALL_DEFAULT = 1,   IB_FLOW_ATTR_MC_DEFAULT = 2,   IB_FLOW_ATTR_SNIFFER = 3 } ;  1675     struct ib_flow_attr {   enum ib_flow_attr_type type;   u16 size;   u16 priority;   u32 flags;   u8 num_of_specs;   u8 port; } ;  1684     struct ib_flow {   struct ib_qp *qp;   struct ib_uobject *uobject; } ;  1693     struct ib_mad_hdr ;  1707     struct ib_pkey_cache ;  1707     struct ib_gid_table ;  1707     struct ib_cache {   rwlock_t lock;   struct ib_event_handler event_handler;   struct ib_pkey_cache **pkey_cache;   struct ib_gid_table **gid_cache;   u8 *lmc_cache; } ;  1719     struct ib_dma_mapping_ops {   int (*mapping_error)(struct ib_device *, u64 );   u64  (*map_single)(struct ib_device *, void *, size_t , enum dma_data_direction );   void (*unmap_single)(struct ib_device *, u64 , size_t , enum dma_data_direction );   u64  (*map_page)(struct ib_device *, struct page *, unsigned long, size_t , enum dma_data_direction );   void (*unmap_page)(struct ib_device *, u64 , size_t , enum dma_data_direction );   int (*map_sg)(struct ib_device *, struct scatterlist *, int, enum dma_data_direction );   void (*unmap_sg)(struct ib_device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_single_for_cpu)(struct ib_device *, u64 , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct ib_device *, u64 , size_t , enum dma_data_direction );   void * (*alloc_coherent)(struct ib_device *, size_t , u64 *, gfp_t );   void (*free_coherent)(struct ib_device *, size_t , void *, u64 ); } ;  1756     struct iw_cm_verbs ;  1757     struct ib_port_immutable {   int pkey_tbl_len;   int gid_tbl_len;   u32 core_cap_flags;   u32 max_mad_size; } ;  1767     enum ldv_40285 {   IB_DEV_UNINITIALIZED = 0,   IB_DEV_REGISTERED = 1,   IB_DEV_UNREGISTERED = 2 } ;  1773     struct ib_device {   struct device *dma_device;   char name[64U];   struct list_head event_handler_list;   spinlock_t event_handler_lock;   spinlock_t client_data_lock;   struct list_head core_list;   struct list_head client_data_list;   struct ib_cache cache;   struct ib_port_immutable *port_immutable;   int num_comp_vectors;   struct iw_cm_verbs *iwcm;   struct rdma_hw_stats * (*alloc_hw_stats)(struct ib_device *, u8 );   int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u8 , int);   int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *);   int (*query_port)(struct ib_device *, u8 , struct ib_port_attr *);   enum rdma_link_layer  (*get_link_layer)(struct ib_device *, u8 );   struct net_device * (*get_netdev)(struct ib_device *, u8 );   int (*query_gid)(struct ib_device *, u8 , int, union ib_gid *);   int (*add_gid)(struct ib_device *, u8 , unsigned int, const union ib_gid *, const struct ib_gid_attr *, void **);   int (*del_gid)(struct ib_device *, u8 , unsigned int, void **);   int (*query_pkey)(struct ib_device *, u8 , u16 , u16 *);   int (*modify_device)(struct ib_device *, int, struct ib_device_modify *);   int (*modify_port)(struct ib_device *, u8 , int, struct ib_port_modify *);   struct ib_ucontext * (*alloc_ucontext)(struct ib_device *, struct ib_udata *);   int (*dealloc_ucontext)(struct ib_ucontext *);   int (*mmap)(struct ib_ucontext *, struct vm_area_struct *);   struct ib_pd * (*alloc_pd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *);   int (*dealloc_pd)(struct ib_pd *);   struct ib_ah * (*create_ah)(struct ib_pd *, struct ib_ah_attr *);   int (*modify_ah)(struct ib_ah *, struct ib_ah_attr *);   int (*query_ah)(struct ib_ah *, struct ib_ah_attr *);   int (*destroy_ah)(struct ib_ah *);   struct ib_srq * (*create_srq)(struct ib_pd *, struct ib_srq_init_attr *, struct ib_udata *);   int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask , struct ib_udata *);   int (*query_srq)(struct ib_srq *, struct ib_srq_attr *);   int (*destroy_srq)(struct ib_srq *);   int (*post_srq_recv)(struct ib_srq *, struct ib_recv_wr *, struct ib_recv_wr **);   struct ib_qp * (*create_qp)(struct ib_pd *, struct ib_qp_init_attr *, struct ib_udata *);   int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);   int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *);   int (*destroy_qp)(struct ib_qp *);   int (*post_send)(struct ib_qp *, struct ib_send_wr *, struct ib_send_wr **);   int (*post_recv)(struct ib_qp *, struct ib_recv_wr *, struct ib_recv_wr **);   struct ib_cq * (*create_cq)(struct ib_device *, const struct ib_cq_init_attr *, struct ib_ucontext *, struct ib_udata *);   int (*modify_cq)(struct ib_cq *, u16 , u16 );   int (*destroy_cq)(struct ib_cq *);   int (*resize_cq)(struct ib_cq *, int, struct ib_udata *);   int (*poll_cq)(struct ib_cq *, int, struct ib_wc *);   int (*peek_cq)(struct ib_cq *, int);   int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags );   int (*req_ncomp_notif)(struct ib_cq *, int);   struct ib_mr * (*get_dma_mr)(struct ib_pd *, int);   struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64 , u64 , u64 , int, struct ib_udata *);   int (*rereg_user_mr)(struct ib_mr *, int, u64 , u64 , u64 , int, struct ib_pd *, struct ib_udata *);   int (*dereg_mr)(struct ib_mr *);   struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type , u32 );   int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *);   struct ib_mw * (*alloc_mw)(struct ib_pd *, enum ib_mw_type , struct ib_udata *);   int (*dealloc_mw)(struct ib_mw *);   struct ib_fmr * (*alloc_fmr)(struct ib_pd *, int, struct ib_fmr_attr *);   int (*map_phys_fmr)(struct ib_fmr *, u64 *, int, u64 );   int (*unmap_fmr)(struct list_head *);   int (*dealloc_fmr)(struct ib_fmr *);   int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16 );   int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16 );   int (*process_mad)(struct ib_device *, int, u8 , const struct ib_wc *, const struct ib_grh *, const struct ib_mad_hdr *, size_t , struct ib_mad_hdr *, size_t *, u16 *);   struct ib_xrcd * (*alloc_xrcd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *);   int (*dealloc_xrcd)(struct ib_xrcd *);   struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, int);   int (*destroy_flow)(struct ib_flow *);   int (*check_mr_status)(struct ib_mr *, u32 , struct ib_mr_status *);   void (*disassociate_ucontext)(struct ib_ucontext *);   void (*drain_rq)(struct ib_qp *);   void (*drain_sq)(struct ib_qp *);   int (*set_vf_link_state)(struct ib_device *, int, u8 , int);   int (*get_vf_config)(struct ib_device *, int, u8 , struct ifla_vf_info *);   int (*get_vf_stats)(struct ib_device *, int, u8 , struct ifla_vf_stats *);   int (*set_vf_guid)(struct ib_device *, int, u8 , u64 , int);   struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *);   int (*destroy_wq)(struct ib_wq *);   int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32 , struct ib_udata *);   struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *);   int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *);   struct ib_dma_mapping_ops *dma_ops;   struct module *owner;   struct device dev;   struct kobject *ports_parent;   struct list_head port_list;   enum ldv_40285 reg_state;   int uverbs_abi_ver;   u64 uverbs_cmd_mask;   u64 uverbs_ex_cmd_mask;   char node_desc[64U];   __be64 node_guid;   u32 local_dma_lkey;   unsigned char is_switch;   u8 node_type;   u8 phys_port_cnt;   struct ib_device_attr attrs;   struct attribute_group *hw_stats_ag;   struct rdma_hw_stats *hw_stats;   int (*get_port_immutable)(struct ib_device *, u8 , struct ib_port_immutable *);   void (*get_dev_fw_str)(struct ib_device *, char *, size_t ); } ;   249     struct ib_mad_hdr {   u8 base_version;   u8 mgmt_class;   u8 class_version;   u8 method;   __be16 status;   __be16 class_specific;   __be64 tid;   __be16 attr_id;   __be16 resv;   __be32 attr_mod; } ;   148     struct ib_umem_odp ;   149     struct ib_umem {   struct ib_ucontext *context;   size_t length;   unsigned long address;   int page_size;   int writable;   int hugetlb;   struct work_struct work;   struct pid *pid;   struct mm_struct *mm;   unsigned long diff;   struct ib_umem_odp *odp_data;   struct sg_table sg_head;   int nmap;   int npages; } ;   165     struct ipv4_devconf {   void *sysctl;   int data[31U];   unsigned long state[1U]; } ;    20     struct ip_mc_list ;    20     struct in_device {   struct net_device *dev;   atomic_t refcnt;   int dead;   struct in_ifaddr *ifa_list;   struct ip_mc_list *mc_list;   struct ip_mc_list **mc_hash;   int mc_count;   spinlock_t mc_tomb_lock;   struct ip_mc_list *mc_tomb;   unsigned long mr_v1_seen;   unsigned long mr_v2_seen;   unsigned long mr_maxdelay;   unsigned char mr_qrv;   unsigned char mr_gq_running;   unsigned char mr_ifc_count;   struct timer_list mr_gq_timer;   struct timer_list mr_ifc_timer;   struct neigh_parms *arp_parms;   struct ipv4_devconf cnf;   struct callback_head callback_head; } ;    71     struct in_ifaddr {   struct hlist_node hash;   struct in_ifaddr *ifa_next;   struct in_device *ifa_dev;   struct callback_head callback_head;   __be32 ifa_local;   __be32 ifa_address;   __be32 ifa_mask;   __be32 ifa_broadcast;   unsigned char ifa_scope;   unsigned char ifa_prefixlen;   __u32 ifa_flags;   char ifa_label[16U];   __u32 ifa_valid_lft;   __u32 ifa_preferred_lft;   unsigned long ifa_cstamp;   unsigned long ifa_tstamp; } ;    48     struct rxe_dev ;   128     struct rxe_qp ;   128     struct rxe_send_wqe ;   128     struct rxe_pkt_info {   struct rxe_dev *rxe;   struct rxe_qp *qp;   struct rxe_send_wqe *wqe;   u8 *hdr;   u32 mask;   u32 psn;   u16 pkey_index;   u16 paylen;   u8 port_num;   u8 opcode;   u8 offset; } ;   149     struct __anonstruct_global_641 {   __be64 subnet_prefix;   __be64 interface_id; } ;   149     union rxe_gid {   __u8 raw[16U];   struct __anonstruct_global_641 global; } ;    45     struct rxe_global_route {   union rxe_gid dgid;   __u32 flow_label;   __u8 sgid_index;   __u8 hop_limit;   __u8 traffic_class; } ;    53     union __anonunion_sgid_addr_642 {   struct sockaddr _sockaddr;   struct sockaddr_in _sockaddr_in;   struct sockaddr_in6 _sockaddr_in6; } ;    53     union __anonunion_dgid_addr_643 {   struct sockaddr _sockaddr;   struct sockaddr_in _sockaddr_in;   struct sockaddr_in6 _sockaddr_in6; } ;    53     struct rxe_av {   __u8 port_num;   __u8 network_type;   struct rxe_global_route grh;   union __anonunion_sgid_addr_642 sgid_addr;   union __anonunion_dgid_addr_643 dgid_addr; } ;    64     union __anonunion_ex_644 {   __be32 imm_data;   __u32 invalidate_rkey; } ;    64     struct __anonstruct_rdma_646 {   __u64 remote_addr;   __u32 rkey; } ;    64     struct __anonstruct_atomic_647 {   __u64 remote_addr;   __u64 compare_add;   __u64 swap;   __u32 rkey; } ;    64     struct __anonstruct_ud_648 {   __u32 remote_qpn;   __u32 remote_qkey;   __u16 pkey_index; } ;    64     struct __anonstruct_reg_649 {   struct ib_mr *mr;   __u32 key;   int access; } ;    64     union __anonunion_wr_645 {   struct __anonstruct_rdma_646 rdma;   struct __anonstruct_atomic_647 atomic;   struct __anonstruct_ud_648 ud;   struct __anonstruct_reg_649 reg; } ;    64     struct rxe_send_wr {   __u64 wr_id;   __u32 num_sge;   __u32 opcode;   __u32 send_flags;   union __anonunion_ex_644 ex;   union __anonunion_wr_645 wr; } ;    97     struct rxe_sge {   __u64 addr;   __u32 length;   __u32 lkey; } ;   109     union __anonunion____missing_field_name_650 {   __u8 inline_data[0U];   struct rxe_sge sge[0U]; } ;   109     struct rxe_dma_info {   __u32 length;   __u32 resid;   __u32 cur_sge;   __u32 num_sge;   __u32 sge_offset;   union __anonunion____missing_field_name_650 __annonCompField154; } ;   121     struct rxe_send_wqe {   struct rxe_send_wr wr;   struct rxe_av av;   __u32 status;   __u32 state;   __u64 iova;   __u32 mask;   __u32 first_psn;   __u32 last_psn;   __u32 ack_length;   __u32 ssn;   __u32 has_rd_atomic;   struct rxe_dma_info dma; } ;   136     struct rxe_recv_wqe {   __u64 wr_id;   __u32 num_sge;   __u32 padding;   struct rxe_dma_info dma; } ;   143     enum rxe_pool_flags {   RXE_POOL_ATOMIC = 1,   RXE_POOL_INDEX = 2,   RXE_POOL_KEY = 4 } ;   149     enum rxe_elem_type {   RXE_TYPE_UC = 0,   RXE_TYPE_PD = 1,   RXE_TYPE_AH = 2,   RXE_TYPE_SRQ = 3,   RXE_TYPE_QP = 4,   RXE_TYPE_CQ = 5,   RXE_TYPE_MR = 6,   RXE_TYPE_MW = 7,   RXE_TYPE_MC_GRP = 8,   RXE_TYPE_MC_ELEM = 9,   RXE_NUM_TYPES = 10 } ;    73     enum rxe_pool_state {   rxe_pool_invalid = 0,   rxe_pool_valid = 1 } ;    78     struct rxe_pool ;    78     struct rxe_pool_entry {   struct rxe_pool *pool;   struct kref ref_cnt;   struct list_head list;   struct rb_node node;   u32 index; } ;    88     struct rxe_pool {   struct rxe_dev *rxe;   spinlock_t pool_lock;   size_t elem_size;   struct kref ref_cnt;   void (*cleanup)(void *);   enum rxe_pool_state state;   enum rxe_pool_flags flags;   enum rxe_elem_type type;   unsigned int max_elem;   atomic_t num_elem;   struct rb_root tree;   unsigned long *table;   size_t table_size;   u32 max_index;   u32 min_index;   u32 last;   size_t key_offset;   size_t key_size; } ;   162     struct rxe_task {   void *obj;   struct tasklet_struct tasklet;   int state;   spinlock_t state_lock;   void *arg;   int (*func)(void *);   int ret;   char name[16U]; } ;    65     struct rxe_pd {   struct rxe_pool_entry pelem;   struct ib_pd ibpd; } ;    84     struct rxe_queue ;    84     struct rxe_cq {   struct rxe_pool_entry pelem;   struct ib_cq ibcq;   struct rxe_queue *queue;   spinlock_t cq_lock;   u8 notify;   int is_user;   struct tasklet_struct comp_task; } ;   102     struct rxe_sq {   int max_wr;   int max_sge;   int max_inline;   spinlock_t sq_lock;   struct rxe_queue *queue; } ;   110     struct rxe_rq {   int max_wr;   int max_sge;   spinlock_t producer_lock;   spinlock_t consumer_lock;   struct rxe_queue *queue; } ;   118     struct rxe_srq {   struct rxe_pool_entry pelem;   struct ib_srq ibsrq;   struct rxe_pd *pd;   struct rxe_rq rq;   u32 srq_num;   int limit;   int error; } ;   129     enum rxe_qp_state {   QP_STATE_RESET = 0,   QP_STATE_INIT = 1,   QP_STATE_READY = 2,   QP_STATE_DRAIN = 3,   QP_STATE_DRAINED = 4,   QP_STATE_ERROR = 5 } ;   140     struct rxe_req_info {   enum rxe_qp_state state;   int wqe_index;   u32 psn;   int opcode;   atomic_t rd_atomic;   int wait_fence;   int need_rd_atomic;   int wait_psn;   int need_retry;   int noack_pkts;   struct rxe_task task; } ;   154     struct rxe_comp_info {   u32 psn;   int opcode;   int timeout;   int timeout_retry;   u32 retry_cnt;   u32 rnr_retry;   struct rxe_task task; } ;   164     enum rdatm_res_state {   rdatm_res_state_next = 0,   rdatm_res_state_new = 1,   rdatm_res_state_replay = 2 } ;   170     struct __anonstruct_atomic_653 {   struct sk_buff *skb; } ;   170     struct rxe_mem ;   170     struct __anonstruct_read_654 {   struct rxe_mem *mr;   u64 va_org;   u32 rkey;   u32 length;   u64 va;   u32 resid; } ;   170     union __anonunion____missing_field_name_652 {   struct __anonstruct_atomic_653 atomic;   struct __anonstruct_read_654 read; } ;   170     struct resp_res {   int type;   u32 first_psn;   u32 last_psn;   u32 cur_psn;   enum rdatm_res_state state;   union __anonunion____missing_field_name_652 __annonCompField156; } ;   192     struct __anonstruct_srq_wqe_655 {   struct rxe_recv_wqe wqe;   struct ib_sge sge[32U]; } ;   192     struct rxe_resp_info {   enum rxe_qp_state state;   u32 msn;   u32 psn;   int opcode;   int drop_msg;   int goto_error;   int sent_psn_nak;   enum ib_wc_status status;   u8 aeth_syndrome;   struct rxe_recv_wqe *wqe;   u64 va;   struct rxe_mem *mr;   u32 resid;   u32 rkey;   u64 atomic_orig;   struct __anonstruct_srq_wqe_655 srq_wqe;   struct resp_res *resources;   unsigned int res_head;   unsigned int res_tail;   struct resp_res *res;   struct rxe_task task; } ;   229     struct rxe_qp {   struct rxe_pool_entry pelem;   struct ib_qp ibqp;   struct ib_qp_attr attr;   unsigned int valid;   unsigned int mtu;   int is_user;   struct rxe_pd *pd;   struct rxe_srq *srq;   struct rxe_cq *scq;   struct rxe_cq *rcq;   enum ib_sig_type sq_sig_type;   struct rxe_sq sq;   struct rxe_rq rq;   struct socket *sk;   struct rxe_av pri_av;   struct rxe_av alt_av;   struct list_head grp_list;   spinlock_t grp_lock;   struct sk_buff_head req_pkts;   struct sk_buff_head resp_pkts;   struct sk_buff_head send_pkts;   struct rxe_req_info req;   struct rxe_comp_info comp;   struct rxe_resp_info resp;   atomic_t ssn;   atomic_t skb_out;   int need_req_skb;   struct timer_list retrans_timer;   u64 qp_timeout_jiffies;   struct timer_list rnr_nak_timer;   spinlock_t state_lock; } ;   282     enum rxe_mem_state {   RXE_MEM_STATE_ZOMBIE = 0,   RXE_MEM_STATE_INVALID = 1,   RXE_MEM_STATE_FREE = 2,   RXE_MEM_STATE_VALID = 3 } ;   289     enum rxe_mem_type {   RXE_MEM_TYPE_NONE = 0,   RXE_MEM_TYPE_DMA = 1,   RXE_MEM_TYPE_MR = 2,   RXE_MEM_TYPE_FMR = 3,   RXE_MEM_TYPE_MW = 4 } ;   297     struct rxe_phys_buf {   u64 addr;   u64 size; } ;   304     struct rxe_map {   struct rxe_phys_buf buf[256U]; } ;   308     union __anonunion____missing_field_name_656 {   struct ib_mr ibmr;   struct ib_mw ibmw; } ;   308     struct rxe_mem {   struct rxe_pool_entry pelem;   union __anonunion____missing_field_name_656 __annonCompField157;   struct rxe_pd *pd;   struct ib_umem *umem;   u32 lkey;   u32 rkey;   enum rxe_mem_state state;   enum rxe_mem_type type;   u64 va;   u64 iova;   size_t length;   u32 offset;   int access;   int page_shift;   int page_mask;   int map_shift;   int map_mask;   u32 num_buf;   u32 nbuf;   u32 max_buf;   u32 num_map;   struct rxe_map **map; } ;   362     struct rxe_port {   struct ib_port_attr attr;   u16 *pkey_tbl;   __be64 port_guid;   __be64 subnet_prefix;   spinlock_t port_lock;   unsigned int mtu_cap;   u32 qp_smi_index;   u32 qp_gsi_index; } ;   374     struct rxe_ifc_ops {   void (*release)(struct rxe_dev *);   __be64  (*node_guid)(struct rxe_dev *);   __be64  (*port_guid)(struct rxe_dev *);   struct device * (*dma_device)(struct rxe_dev *);   int (*mcast_add)(struct rxe_dev *, union ib_gid *);   int (*mcast_delete)(struct rxe_dev *, union ib_gid *);   int (*prepare)(struct rxe_dev *, struct rxe_pkt_info *, struct sk_buff *, u32 *);   int (*send)(struct rxe_dev *, struct rxe_pkt_info *, struct sk_buff *);   int (*loopback)(struct sk_buff *);   struct sk_buff * (*init_packet)(struct rxe_dev *, struct rxe_av *, int, struct rxe_pkt_info *);   char * (*parent_name)(struct rxe_dev *, unsigned int);   enum rdma_link_layer  (*link_layer)(struct rxe_dev *, unsigned int); } ;   393     struct rxe_dev {   struct ib_device ib_dev;   struct ib_device_attr attr;   int max_ucontext;   int max_inline_data;   struct kref ref_cnt;   struct mutex usdev_lock;   struct rxe_ifc_ops *ifc_ops;   struct net_device *ndev;   int xmit_errors;   struct rxe_pool uc_pool;   struct rxe_pool pd_pool;   struct rxe_pool ah_pool;   struct rxe_pool srq_pool;   struct rxe_pool qp_pool;   struct rxe_pool cq_pool;   struct rxe_pool mr_pool;   struct rxe_pool mw_pool;   struct rxe_pool mc_grp_pool;   struct rxe_pool mc_elem_pool;   spinlock_t pending_lock;   struct list_head pending_mmaps;   spinlock_t mmap_offset_lock;   int mmap_offset;   struct rxe_port port;   struct list_head list; } ;   279     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;   405     union __anonunion_ex_612 {   __u32 imm_data;   __u32 invalidate_rkey; } ;   405     struct ib_uverbs_wc {   __u64 wr_id;   __u32 status;   __u32 opcode;   __u32 vendor_err;   __u32 byte_len;   union __anonunion_ex_612 ex;   __u32 qp_num;   __u32 src_qp;   __u32 wc_flags;   __u16 pkey_index;   __u16 slid;   __u8 sl;   __u8 dlid_path_bits;   __u8 port_num;   __u8 reserved; } ;    82     enum rxe_hdr_mask {   RXE_LRH_MASK = 1,   RXE_GRH_MASK = 2,   RXE_BTH_MASK = 4,   RXE_IMMDT_MASK = 1024,   RXE_RETH_MASK = 8,   RXE_AETH_MASK = 16,   RXE_ATMETH_MASK = 32,   RXE_ATMACK_MASK = 64,   RXE_IETH_MASK = 128,   RXE_RDETH_MASK = 256,   RXE_DETH_MASK = 512,   RXE_PAYLOAD_MASK = 2048,   RXE_REQ_MASK = 4096,   RXE_ACK_MASK = 8192,   RXE_SEND_MASK = 16384,   RXE_WRITE_MASK = 32768,   RXE_READ_MASK = 65536,   RXE_ATOMIC_MASK = 131072,   RXE_RWR_MASK = 262144,   RXE_COMP_MASK = 524288,   RXE_START_MASK = 1048576,   RXE_MIDDLE_MASK = 2097152,   RXE_END_MASK = 4194304,   RXE_LOOPBACK_MASK = 16777216,   RXE_READ_OR_ATOMIC = 196608,   RXE_WRITE_OR_SEND = 49152 } ;   111     struct rxe_opcode_info {   char *name;   enum rxe_hdr_mask mask;   int length;   int offset[12U]; } ;    54     struct rxe_bth {   u8 opcode;   u8 flags;   __be16 pkey;   __be32 qpn;   __be32 apsn; } ;   743     struct rxe_aeth {   __be32 smsn; } ;   823     struct rxe_atmack {   __be64 orig; } ;   103     struct mminfo {   __u64 offset;   __u32 size;   __u32 pad; } ;    77     union __anonunion____missing_field_name_651 {   struct ib_wc ibwc;   struct ib_uverbs_wc uibwc; } ;    77     struct rxe_cqe {   union __anonunion____missing_field_name_651 __annonCompField155; } ;    82     struct rxe_mmap_info {   struct list_head pending_mmaps;   struct ib_ucontext *context;   struct kref ref;   void *obj;   struct mminfo info; } ;   101     enum copy_direction {   to_mem_obj = 0,   from_mem_obj = 1 } ;   285     struct rxe_queue_buf {   __u32 log2_elem_size;   __u32 index_mask;   __u32 pad_1[30U];   __u32 producer_index;   __u32 pad_2[31U];   __u32 consumer_index;   __u32 pad_3[31U];   __u8 data[0U]; } ;    68     struct rxe_queue {   struct rxe_dev *rxe;   struct rxe_queue_buf *buf;   struct rxe_mmap_info *ip;   size_t buf_size;   size_t elem_size;   unsigned int log2_elem_size;   unsigned int index_mask; } ;   177     enum comp_state {   COMPST_GET_ACK = 0,   COMPST_GET_WQE = 1,   COMPST_COMP_WQE = 2,   COMPST_COMP_ACK = 3,   COMPST_CHECK_PSN = 4,   COMPST_CHECK_ACK = 5,   COMPST_READ = 6,   COMPST_ATOMIC = 7,   COMPST_WRITE_SEND = 8,   COMPST_UPDATE_COMP = 9,   COMPST_ERROR_RETRY = 10,   COMPST_RNR_RETRY = 11,   COMPST_ERROR = 12,   COMPST_EXIT = 13,   COMPST_DONE = 14 } ;    26     union __anonunion___u_42 {   int __val;   char __c[1U]; } ;    52     enum rxe_wr_mask {   WR_INLINE_MASK = 1,   WR_ATOMIC_MASK = 2,   WR_SEND_MASK = 4,   WR_READ_MASK = 8,   WR_WRITE_MASK = 16,   WR_LOCAL_MASK = 32,   WR_REG_MASK = 64,   WR_READ_OR_WRITE_MASK = 24,   WR_READ_WRITE_OR_SEND_MASK = 28,   WR_WRITE_OR_SEND_MASK = 20,   WR_ATOMIC_OR_READ_MASK = 10 } ;    66     struct rxe_wr_opcode_info {   char *name;   enum rxe_wr_mask mask[8U]; } ;   479     struct rxe_deth {   __be32 qkey;   __be32 sqp; } ;   542     struct rxe_reth {   __be64 va;   __be32 rkey;   __be32 len; } ;   629     struct rxe_atmeth {   __be64 va;   __be32 rkey;   __be64 swap_add;   __be64 comp; } ;   856     struct rxe_immdt {   __be32 imm; } ;   889     struct rxe_ieth {   __be32 rkey; } ;    94     enum wqe_state {   wqe_state_posted = 0,   wqe_state_processing = 1,   wqe_state_pending = 2,   wqe_state_done = 3,   wqe_state_error = 4 } ;    39     typedef __u16 __sum16;    43     struct ratelimit_state {   raw_spinlock_t lock;   int interval;   int burst;   int printed;   int missed;   unsigned long begin;   unsigned long flags; } ;   106     struct ipv6hdr {   unsigned char priority;   unsigned char version;   __u8 flow_lbl[3U];   __be16 payload_len;   __u8 nexthdr;   __u8 hop_limit;   struct in6_addr saddr;   struct in6_addr daddr; } ;  1047     struct iphdr {   unsigned char ihl;   unsigned char version;   __u8 tos;   __be16 tot_len;   __be16 id;   __be16 frag_off;   __u8 ttl;   __u8 protocol;   __sum16 check;   __be32 saddr;   __be32 daddr; } ;   611     struct __anonstruct____missing_field_name_597 {   u8 reserved[20U];   struct iphdr roce4grh; } ;   611     union rdma_network_hdr {   struct ib_grh ibgrh;   struct __anonstruct____missing_field_name_597 __annonCompField138; } ;   126     enum lookup_type {   lookup_local = 0,   lookup_remote = 1 } ;   177     enum resp_states {   RESPST_NONE = 0,   RESPST_GET_REQ = 1,   RESPST_CHK_PSN = 2,   RESPST_CHK_OP_SEQ = 3,   RESPST_CHK_OP_VALID = 4,   RESPST_CHK_RESOURCE = 5,   RESPST_CHK_LENGTH = 6,   RESPST_CHK_RKEY = 7,   RESPST_EXECUTE = 8,   RESPST_READ_REPLY = 9,   RESPST_COMPLETE = 10,   RESPST_ACKNOWLEDGE = 11,   RESPST_CLEANUP = 12,   RESPST_DUPLICATE_REQUEST = 13,   RESPST_ERR_MALFORMED_WQE = 14,   RESPST_ERR_UNSUPPORTED_OPCODE = 15,   RESPST_ERR_MISALIGNED_ATOMIC = 16,   RESPST_ERR_PSN_OUT_OF_SEQ = 17,   RESPST_ERR_MISSING_OPCODE_FIRST = 18,   RESPST_ERR_MISSING_OPCODE_LAST_C = 19,   RESPST_ERR_MISSING_OPCODE_LAST_D1E = 20,   RESPST_ERR_TOO_MANY_RDMA_ATM_REQ = 21,   RESPST_ERR_RNR = 22,   RESPST_ERR_RKEY_VIOLATION = 23,   RESPST_ERR_LENGTH = 24,   RESPST_ERR_CQ_OVERFLOW = 25,   RESPST_ERROR = 26,   RESPST_RESET = 27,   RESPST_DONE = 28,   RESPST_EXIT = 29 } ;   343     struct rxe_mc_grp {   struct rxe_pool_entry pelem;   spinlock_t mcg_lock;   struct rxe_dev *rxe;   struct list_head qp_list;   union ib_gid mgid;   int num_qp;   u32 qkey;   u16 pkey; } ;   354     struct rxe_mc_elem {   struct rxe_pool_entry pelem;   struct list_head qp_list;   struct list_head grp_list;   struct rxe_qp *qp;   struct rxe_mc_grp *grp; } ;    26     union __anonunion___u_42___0 {   int __val;   char __c[1U]; } ;    38     union __anonunion___u_44___0 {   int __val;   char __c[1U]; } ;   163     struct rxe_type_info {   char *name;   size_t size;   void (*cleanup)(void *);   enum rxe_pool_flags flags;   u32 max_index;   u32 min_index;   size_t key_offset;   size_t key_size;   struct kmem_cache *cache; } ;   685     struct cpuinfo_x86 ;    80     struct cpuinfo_x86 {   __u8 x86;   __u8 x86_vendor;   __u8 x86_model;   __u8 x86_mask;   int x86_tlbsize;   __u8 x86_virt_bits;   __u8 x86_phys_bits;   __u8 x86_coreid_bits;   __u32 extended_cpuid_level;   int cpuid_level;   __u32 x86_capability[19U];   char x86_vendor_id[16U];   char x86_model_id[64U];   int x86_cache_size;   int x86_cache_alignment;   int x86_cache_max_rmid;   int x86_cache_occ_scale;   int x86_power;   unsigned long loops_per_jiffy;   u16 x86_max_cores;   u16 apicid;   u16 initial_apicid;   u16 x86_clflush_size;   u16 booted_cores;   u16 phys_proc_id;   u16 logical_proc_id;   u16 cpu_core_id;   u16 cpu_index;   u32 microcode; } ;  3319     typedef int pao_T_____33;  3319     typedef int pao_T_____34;  3319     typedef int pao_T_____35;  3319     typedef int pao_T_____36;  3330     typedef int pao_T_____37;  3330     typedef int pao_T_____38;  3330     typedef int pao_T_____39;  3330     typedef int pao_T_____40;  1210     struct ib_rdma_wr {   struct ib_send_wr wr;   u64 remote_addr;   u32 rkey; } ;  1222     struct ib_atomic_wr {   struct ib_send_wr wr;   u64 remote_addr;   u64 compare_add;   u64 swap;   u64 compare_add_mask;   u64 swap_mask;   u32 rkey; } ;  1237     struct ib_ud_wr {   struct ib_send_wr wr;   struct ib_ah *ah;   void *header;   int hlen;   int mss;   u32 remote_qpn;   u32 remote_qkey;   u16 pkey_index;   u8 port_num; } ;  1254     struct ib_reg_wr {   struct ib_send_wr wr;   struct ib_mr *mr;   u32 key;   int access; } ;    60     struct rxe_ucontext {   struct rxe_pool_entry pelem;   struct ib_ucontext ibuc; } ;    70     struct rxe_ah {   struct rxe_pool_entry pelem;   struct ib_ah ibah;   struct rxe_pd *pd;   struct rxe_av av; } ;   122     enum rdma_network_type {   RDMA_NETWORK_IB = 0,   RDMA_NETWORK_ROCE_V1 = 0,   RDMA_NETWORK_IPV4 = 1,   RDMA_NETWORK_IPV6 = 2 } ;    27     union __anonunion___u_9___0 {   struct list_head *__val;   char __c[1U]; } ;    38     union __anonunion___u_44___1 {   int __val;   char __c[1U]; } ;    66     enum sock_shutdown_cmd {   SHUT_RD = 0,   SHUT_WR = 1,   SHUT_RDWR = 2 } ;  1064     enum ib_qp_attr_mask {   IB_QP_STATE = 1,   IB_QP_CUR_STATE = 2,   IB_QP_EN_SQD_ASYNC_NOTIFY = 4,   IB_QP_ACCESS_FLAGS = 8,   IB_QP_PKEY_INDEX = 16,   IB_QP_PORT = 32,   IB_QP_QKEY = 64,   IB_QP_AV = 128,   IB_QP_PATH_MTU = 256,   IB_QP_TIMEOUT = 512,   IB_QP_RETRY_CNT = 1024,   IB_QP_RNR_RETRY = 2048,   IB_QP_RQ_PSN = 4096,   IB_QP_MAX_QP_RD_ATOMIC = 8192,   IB_QP_ALT_PATH = 16384,   IB_QP_MIN_RNR_TIMER = 32768,   IB_QP_SQ_PSN = 65536,   IB_QP_MAX_DEST_RD_ATOMIC = 131072,   IB_QP_PATH_MIG_STATE = 262144,   IB_QP_CAP = 524288,   IB_QP_DEST_QPN = 1048576,   IB_QP_RESERVED1 = 2097152,   IB_QP_RESERVED2 = 4194304,   IB_QP_RESERVED3 = 8388608,   IB_QP_RESERVED4 = 16777216 } ;    27     union __anonunion___u_9___1 {   struct list_head *__val;   char __c[1U]; } ;   189     union __anonunion___u_13 {   struct list_head *__val;   char __c[1U]; } ;    38     union __anonunion___u_44___2 {   int __val;   char __c[1U]; } ;   424     struct udphdr {   __be16 source;   __be16 dest;   __be16 len;   __sum16 check; } ;    27     union __anonunion___u_9___2 {   struct list_head *__val;   char __c[1U]; } ;   189     union __anonunion___u_13___0 {   struct list_head *__val;   char __c[1U]; } ;   189     union __anonunion___u_13___1 {   struct list_head *__val;   char __c[1U]; } ;   310     struct skb_frag_struct ;   310     typedef struct skb_frag_struct skb_frag_t;   311     struct __anonstruct_page_395 {   struct page *p; } ;   311     struct skb_frag_struct {   struct __anonstruct_page_395 page;   __u32 page_offset;   __u32 size; } ;   344     struct skb_shared_hwtstamps {   ktime_t hwtstamp; } ;   410     struct skb_shared_info {   unsigned char nr_frags;   __u8 tx_flags;   unsigned short gso_size;   unsigned short gso_segs;   unsigned short gso_type;   struct sk_buff *frag_list;   struct skb_shared_hwtstamps hwtstamps;   u32 tskey;   __be32 ip6_frag_id;   atomic_t dataref;   void *destructor_arg;   skb_frag_t frags[17U]; } ;  2281     struct netdev_notifier_info {   struct net_device *dev; } ;   201     struct ip_options {   __be32 faddr;   __be32 nexthop;   unsigned char optlen;   unsigned char srr;   unsigned char rr;   unsigned char ts;   unsigned char is_strictroute;   unsigned char srr_is_hit;   unsigned char is_changed;   unsigned char rr_needaddr;   unsigned char ts_needtime;   unsigned char ts_needaddr;   unsigned char router_alert;   unsigned char cipso;   unsigned char __pad2;   unsigned char __data[0U]; } ;   239     struct net_generic {   unsigned int len;   struct callback_head rcu;   void *ptr[0U]; } ;   345     struct lwtunnel_state {   __u16 type;   __u16 flags;   atomic_t refcnt;   int (*orig_output)(struct net *, struct sock *, struct sk_buff *);   int (*orig_input)(struct sk_buff *);   int len;   __u8 data[0U]; } ;    57     struct fib6_node {   struct fib6_node *parent;   struct fib6_node *left;   struct fib6_node *right;   struct fib6_node *subtree;   struct rt6_info *leaf;   __u16 fn_bit;   __u16 fn_flags;   int fn_sernum;   struct rt6_info *rr_ptr; } ;    83     struct rt6key {   struct in6_addr addr;   int plen; } ;    93     struct rt6_info {   struct dst_entry dst;   struct fib6_table *rt6i_table;   struct fib6_node *rt6i_node;   struct in6_addr rt6i_gateway;   struct list_head rt6i_siblings;   unsigned int rt6i_nsiblings;   atomic_t rt6i_ref;   struct rt6key rt6i_dst;   u32 rt6i_flags;   struct rt6key rt6i_src;   struct rt6key rt6i_prefsrc;   struct list_head rt6i_uncached;   struct uncached_list *rt6i_uncached_list;   struct inet6_dev *rt6i_idev;   struct rt6_info **rt6i_pcpu;   u32 rt6i_metric;   u32 rt6i_pmtu;   unsigned short rt6i_nfheader_len;   u8 rt6i_protocol; } ;   207     struct rt6_statistics {   __u32 fib_nodes;   __u32 fib_route_nodes;   __u32 fib_rt_alloc;   __u32 fib_rt_entries;   __u32 fib_rt_cache;   __u32 fib_discarded_routes; } ;   216     struct fib6_table {   struct hlist_node tb6_hlist;   u32 tb6_id;   rwlock_t tb6_lock;   struct fib6_node tb6_root;   struct inet_peer_base tb6_peers; } ;    20     struct prefix_info {   __u8 type;   __u8 length;   __u8 prefix_len;   unsigned char reserved;   unsigned char autoconf;   unsigned char onlink;   __be32 valid;   __be32 prefered;   __be32 reserved2;   struct in6_addr prefix; } ;   196     struct ipv6_stub {   int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *);   int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *);   int (*ipv6_dst_lookup)(struct net *, struct sock *, struct dst_entry **, struct flowi6 *);   void (*udpv6_encap_enable)();   void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool , bool , bool , bool );   struct neigh_table *nd_tbl; } ;   430     struct inet_skb_parm {   int iif;   struct ip_options opt;   unsigned char flags;   u16 frag_max_size; } ;    52     struct udp_hslot {   struct hlist_head head;   int count;   spinlock_t lock; } ;    66     struct udp_table {   struct udp_hslot *hash;   struct udp_hslot *hash2;   unsigned int mask;   unsigned int log; } ;   353     union __anonunion____missing_field_name_613 {   struct in_addr local_ip;   struct in6_addr local_ip6; } ;   353     union __anonunion____missing_field_name_614 {   struct in_addr peer_ip;   struct in6_addr peer_ip6; } ;   353     struct udp_port_cfg {   u8 family;   union __anonunion____missing_field_name_613 __annonCompField138;   union __anonunion____missing_field_name_614 __annonCompField139;   __be16 local_udp_port;   __be16 peer_udp_port;   unsigned char use_udp_checksums;   unsigned char use_udp6_tx_checksums;   unsigned char use_udp6_rx_checksums;   unsigned char ipv6_v6only; } ;    71     struct udp_tunnel_sock_cfg {   void *sk_user_data;   __u8 encap_type;   int (*encap_rcv)(struct sock *, struct sk_buff *);   void (*encap_destroy)(struct sock *);   struct sk_buff ** (*gro_receive)(struct sock *, struct sk_buff **, struct sk_buff *);   int (*gro_complete)(struct sock *, struct sk_buff *, int); } ;    91     struct udp_tunnel_info {   unsigned short type;   sa_family_t sa_family;   __be16 port; } ;   165     struct rxe_recv_sockets {   struct socket *sk4;   struct socket *sk6; } ;     1     void * __builtin_memcpy(void *, const void *, unsigned long);     1     long int __builtin_expect(long exp, long c);   243     void __write_once_size(volatile void *p, void *res, int size);   275     void __pr_err(const char *, ...);   278     void __pr_info(const char *, ...);    26     void * ldv_undef_ptr();    25     void INIT_LIST_HEAD(struct list_head *list);    71     void warn_slowpath_null(const char *, const int);    36     void atomic_set(atomic_t *v, int i);    78     bool  atomic_sub_and_test(int i, atomic_t *v);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);   289     raw_spinlock_t * spinlock_check(spinlock_t *lock);   119     void __mutex_init(struct mutex *, const char *, struct lock_class_key *);    31     void kref_init(struct kref *kref);    67     int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *));    96     int kref_put(struct kref *kref, void (*release)(struct kref *));   154     void kfree(const void *);   322     void * ldv_kmem_cache_alloc_20(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_40(struct kmem_cache *ldv_func_arg1, gfp_t flags);   579     void * kcalloc(size_t n, size_t size, gfp_t flags);    18     void ldv_check_alloc_flags(gfp_t flags);   971     struct sk_buff * ldv_skb_clone_30(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_39(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_32(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_27(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_28(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_36(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_37(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_38(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_33(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_34(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_35(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);   331     int ib_mtu_enum_to_int(enum ib_mtu mtu);  2087     void ib_dealloc_device(struct ib_device *);    50     int rxe_net_init();    51     void rxe_net_exit();    37     enum ib_mtu  rxe_mtu_int_to_enum(int mtu);    54     enum ib_mtu  eth_mtu_int_to_enum(int mtu);   114     int rxe_cache_init();   117     void rxe_cache_exit();   123     int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, enum rxe_elem_type type, unsigned int max_elem);   127     int rxe_pool_cleanup(struct rxe_pool *pool);   475     int rxe_register_device(struct rxe_dev *rxe);   476     int rxe_unregister_device(struct rxe_dev *rxe);    62     int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu);    64     int rxe_add(struct rxe_dev *rxe, unsigned int mtu);    65     void rxe_remove(struct rxe_dev *rxe);    66     void rxe_remove_all();    70     void rxe_dev_put(struct rxe_dev *rxe);   226     void rxe_release(struct kref *kref);    44     void rxe_cleanup_ports(struct rxe_dev *rxe);    54     void rxe_cleanup(struct rxe_dev *rxe);    83     const char __kstrtab_rxe_dev_put[12U] = { 'r', 'x', 'e', '_', 'd', 'e', 'v', '_', 'p', 'u', 't', '\x0' };    83     const struct kernel_symbol __ksymtab_rxe_dev_put;    83     const struct kernel_symbol __ksymtab_rxe_dev_put = { (unsigned long)(&rxe_dev_put), (const char *)(&__kstrtab_rxe_dev_put) };    86     int rxe_init_device_param(struct rxe_dev *rxe);   135     int rxe_init_port_param(struct rxe_port *port);   166     int rxe_init_ports(struct rxe_dev *rxe);   190     int rxe_init_pools(struct rxe_dev *rxe);   269     int rxe_init(struct rxe_dev *rxe);   315     const char __kstrtab_rxe_set_mtu[12U] = { 'r', 'x', 'e', '_', 's', 'e', 't', '_', 'm', 't', 'u', '\x0' };   315     const struct kernel_symbol __ksymtab_rxe_set_mtu;   315     const struct kernel_symbol __ksymtab_rxe_set_mtu = { (unsigned long)(&rxe_set_mtu), (const char *)(&__kstrtab_rxe_set_mtu) };   344     const char __kstrtab_rxe_add[8U] = { 'r', 'x', 'e', '_', 'a', 'd', 'd', '\x0' };   344     const struct kernel_symbol __ksymtab_rxe_add;   344     const struct kernel_symbol __ksymtab_rxe_add = { (unsigned long)(&rxe_add), (const char *)(&__kstrtab_rxe_add) };   353     const char __kstrtab_rxe_remove[11U] = { 'r', 'x', 'e', '_', 'r', 'e', 'm', 'o', 'v', 'e', '\x0' };   353     const struct kernel_symbol __ksymtab_rxe_remove;   353     const struct kernel_symbol __ksymtab_rxe_remove = { (unsigned long)(&rxe_remove), (const char *)(&__kstrtab_rxe_remove) };   355     int rxe_module_init();   377     void rxe_module_exit();   405     void ldv_check_final_state();   414     void ldv_initialize();   417     void ldv_handler_precall();   420     int nondet_int();   423     int LDV_IN_INTERRUPT = 0;   426     void ldv_main0_sequence_infinite_withcheck_stateful();     7     __u32  __arch_swab32(__u32 val);    14     __u64  __arch_swab64(__u64 val);    55     __u32  __fswab32(__u32 val);    64     __u64  __fswab64(__u64 val);   276     void __pr_warn(const char *, ...);    55     void __dynamic_pr_debug(struct _ddebug *, const char *, ...);     8     void ldv_spin_lock();     9     void ldv_spin_unlock();    56     void * __memset(void *, int, size_t );    89     void atomic_inc(atomic_t *v);    45     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   376     void ldv_spin_unlock_irqrestore_52(spinlock_t *lock, unsigned long flags);   376     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);    78     extern volatile unsigned long jiffies;   365     unsigned long int __usecs_to_jiffies(const unsigned int);   401     unsigned long int usecs_to_jiffies(const unsigned int u);   191     int mod_timer(struct timer_list *, unsigned long);   322     void * ldv_kmem_cache_alloc_60(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_80(struct kmem_cache *ldv_func_arg1, gfp_t flags);   894     void kfree_skb(struct sk_buff *);   971     struct sk_buff * ldv_skb_clone_70(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_79(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_72(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_67(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_68(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_76(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_77(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_78(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1533     __u32  skb_queue_len(const struct sk_buff_head *list_);  1725     void skb_queue_tail(struct sk_buff_head *, struct sk_buff *);  1757     struct sk_buff * skb_dequeue(struct sk_buff_head *);  2395     struct sk_buff * ldv___netdev_alloc_skb_73(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_74(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_75(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);   127     struct rxe_opcode_info rxe_opcode[256U];   152     u8  __bth_pad(void *arg);   330     u8  bth_pad(struct rxe_pkt_info *pkt);   768     u8  __aeth_syn(void *arg);   800     u8  aeth_syn(struct rxe_pkt_info *pkt);   831     u64  __atmack_orig(void *arg);   845     u64  atmack_orig(struct rxe_pkt_info *pkt);   940     void * payload_addr(struct rxe_pkt_info *pkt);   946     size_t  payload_size(struct rxe_pkt_info *pkt);   155     void rxe_elem_release(struct kref *kref);    87     void rxe_run_task(struct rxe_task *task, int sched);    53     int psn_compare(u32 psn_a, u32 psn_b);   430     struct rxe_dev * to_rdev(struct ib_device *dev);    65     int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);   121     int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum copy_direction dir, u32 *crcp);   161     void rxe_qp_error(struct rxe_qp *qp);   172     enum ib_qp_type  qp_type(struct rxe_qp *qp);   205     void retransmit_timer(unsigned long data);   228     int rxe_completer(void *arg);   237     void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, struct sk_buff *skb);   108     int queue_empty(struct rxe_queue *q);   126     void advance_consumer(struct rxe_queue *q);   138     void * consumer_addr(struct rxe_queue *q);   173     void * queue_head(struct rxe_queue *q);    59     char *comp_state_name[15U] = { (char *)"GET ACK", (char *)"GET WQE", (char *)"COMP WQE", (char *)"COMP ACK", (char *)"CHECK PSN", (char *)"CHECK ACK", (char *)"READ", (char *)"ATOMIC", (char *)"WRITE/SEND", (char *)"UPDATE COMP", (char *)"ERROR RETRY", (char *)"RNR RETRY", (char *)"ERROR", (char *)"EXIT", (char *)"DONE" };    77     unsigned long rnrnak_usec[32U] = { 655360UL, 10UL, 20UL, 30UL, 40UL, 60UL, 80UL, 120UL, 160UL, 240UL, 320UL, 480UL, 640UL, 960UL, 1280UL, 1920UL, 2560UL, 3840UL, 5120UL, 7680UL, 10240UL, 15360UL, 20480UL, 30720UL, 40960UL, 61410UL, 81920UL, 122880UL, 163840UL, 245760UL, 327680UL, 491520UL };   112     unsigned long int rnrnak_jiffies(u8 timeout);   118     enum ib_wc_opcode  wr_to_wc_opcode(enum ib_wr_opcode opcode);   160     enum comp_state  get_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe **wqe_p);   188     void reset_retry_counters(struct rxe_qp *qp);   194     enum comp_state  check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);   233     enum comp_state  check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);   348     enum comp_state  do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);   367     enum comp_state  do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);   385     void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe);   415     void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe);   438     enum comp_state  complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);   484     enum comp_state  complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe);   218     void __read_once_size(const volatile void *p, void *res, int size);    46     __u16  __fswab16(__u16 val);    31     void * __memcpy(void *, const void *, size_t );    16     void __xadd_wrong_size();    24     int atomic_read(const atomic_t *v);   154     int atomic_add_return(int i, atomic_t *v);   166     int atomic_sub_return(int i, atomic_t *v);   184     int timer_pending(const struct timer_list *timer);   322     void * ldv_kmem_cache_alloc_100(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_120(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_110(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_119(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_112(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_107(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_108(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_116(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_117(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_118(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_113(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_114(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_115(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    11     u32  crc32_le(u32 , const unsigned char *, size_t );    65     struct rxe_wr_opcode_info rxe_wr_opcode_info[12U];   425     void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se, int mig, int pad, u16 pkey, u32 qpn, int ack_req, u32 psn);   498     void __deth_set_qkey(void *arg, u32 qkey);   512     void __deth_set_sqp(void *arg, u32 sqp);   525     void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey);   537     void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp);   559     void __reth_set_va(void *arg, u64 va);   573     void __reth_set_rkey(void *arg, u32 rkey);   587     void __reth_set_len(void *arg, u32 len);   600     void reth_set_va(struct rxe_pkt_info *pkt, u64 va);   612     void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey);   624     void reth_set_len(struct rxe_pkt_info *pkt, u32 len);   647     void __atmeth_set_va(void *arg, u64 va);   661     void __atmeth_set_rkey(void *arg, u32 rkey);   675     void __atmeth_set_swap_add(void *arg, u64 swap_add);   689     void __atmeth_set_comp(void *arg, u64 comp);   702     void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va);   714     void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey);   726     void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add);   738     void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp);   871     void __immdt_set_imm(void *arg, __be32 imm);   884     void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm);   904     void __ieth_set_rkey(void *arg, u32 rkey);   917     void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey);   149     void * rxe_pool_get_index(struct rxe_pool *pool, u32 index);   465     struct rxe_mem * to_rmr(struct ib_mr *mr);    53     struct rxe_av * rxe_get_av(struct rxe_pkt_info *pkt);   142     int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);   206     void rnr_nak_timer(unsigned long data);   229     int rxe_requester(void *arg);   240     unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp);   245     int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct sk_buff *skb);   103     int next_index(struct rxe_queue *q, int index);   144     unsigned int producer_index(struct rxe_queue *q);   149     unsigned int consumer_index(struct rxe_queue *q);   154     void * addr_from_index(struct rxe_queue *q, unsigned int index);    40     int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, unsigned int opcode);    43     void retry_first_write_send(struct rxe_qp *qp, struct rxe_send_wqe *wqe, unsigned int mask, int npsn);    67     void req_retry(struct rxe_qp *qp);   128     struct rxe_send_wqe * req_next_wqe(struct rxe_qp *qp);   190     int next_opcode_rc(struct rxe_qp *qp, unsigned int opcode, int fits);   262     int next_opcode_uc(struct rxe_qp *qp, unsigned int opcode, int fits);   344     int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe);   364     int get_mtu(struct rxe_qp *qp, struct rxe_send_wqe *wqe);   379     struct sk_buff * init_req_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, int opcode, int payload, struct rxe_pkt_info *pkt);   474     int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, int paylen);   513     void update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, enum wqe_state *prev_state);   530     void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, int payload);   162     int printk(const char *, ...);    30     void _raw_spin_lock_bh(raw_spinlock_t *);    42     void _raw_spin_unlock_bh(raw_spinlock_t *);   309     void ldv_spin_lock_bh_126(spinlock_t *lock);   309     void spin_lock_bh(spinlock_t *lock);   358     void ldv_spin_unlock_bh_130(spinlock_t *lock);   358     void spin_unlock_bh(spinlock_t *lock);    40     void kref_get(struct kref *kref);   322     void * ldv_kmem_cache_alloc_140(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_160(struct kmem_cache *ldv_func_arg1, gfp_t flags);    75     int ___ratelimit(struct ratelimit_state *, const char *);   971     struct sk_buff * ldv_skb_clone_150(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_159(struct sk_buff *ldv_func_arg1, gfp_t flags);   979     struct sk_buff * ldv_skb_clone_161(struct sk_buff *ldv_func_arg1, gfp_t flags);   983     struct sk_buff * ldv_skb_clone_162(struct sk_buff *ldv_func_arg1, gfp_t flags);   988     struct sk_buff * ldv_skb_copy_152(const struct sk_buff *ldv_func_arg1, gfp_t flags);  1001     int ldv_pskb_expand_head_147(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_148(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_156(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1013     int ldv_pskb_expand_head_157(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1017     int ldv_pskb_expand_head_158(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1480     struct sk_buff * skb_peek(const struct sk_buff_head *list_);  2143     unsigned char * skb_network_header(const struct sk_buff *skb);  2395     struct sk_buff * ldv___netdev_alloc_skb_153(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_154(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_155(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    81     struct ipv6hdr * ipv6_hdr(const struct sk_buff *skb);    23     struct iphdr * ip_hdr(const struct sk_buff *skb);   111     void __bth_set_opcode(void *arg, u8 opcode);   118     u8  __bth_se(void *arg);   125     void __bth_set_se(void *arg, int se);   159     void __bth_set_pad(void *arg, u8 pad);   203     void __bth_set_qpn(void *arg, u32 qpn);   260     int __bth_ack(void *arg);   267     void __bth_set_ack(void *arg, int ack);   291     void __bth_set_psn(void *arg, u32 psn);   305     void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode);   310     u8  bth_se(struct rxe_pkt_info *pkt);   315     void bth_set_se(struct rxe_pkt_info *pkt, int se);   335     void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad);   365     void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn);   400     int bth_ack(struct rxe_pkt_info *pkt);   405     void bth_set_ack(struct rxe_pkt_info *pkt, int ack);   420     void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn);   505     u32  __deth_sqp(void *arg);   531     u32  deth_sqp(struct rxe_pkt_info *pkt);   552     u64  __reth_va(void *arg);   566     u32  __reth_rkey(void *arg);   580     u32  __reth_len(void *arg);   594     u64  reth_va(struct rxe_pkt_info *pkt);   606     u32  reth_rkey(struct rxe_pkt_info *pkt);   618     u32  reth_len(struct rxe_pkt_info *pkt);   640     u64  __atmeth_va(void *arg);   654     u32  __atmeth_rkey(void *arg);   668     u64  __atmeth_swap_add(void *arg);   682     u64  __atmeth_comp(void *arg);   696     u64  atmeth_va(struct rxe_pkt_info *pkt);   708     u32  atmeth_rkey(struct rxe_pkt_info *pkt);   720     u64  atmeth_swap_add(struct rxe_pkt_info *pkt);   732     u64  atmeth_comp(struct rxe_pkt_info *pkt);   775     void __aeth_set_syn(void *arg, u8 syn);   791     void __aeth_set_msn(void *arg, u32 msn);   806     void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn);   818     void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn);   838     void __atmack_set_orig(void *arg, u64 orig);   851     void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig);   864     __be32  __immdt_imm(void *arg);   878     __be32  immdt_imm(struct rxe_pkt_info *pkt);   897     u32  __ieth_rkey(void *arg);   911     u32  ieth_rkey(struct rxe_pkt_info *pkt);   118     int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, enum copy_direction dir, u32 *crcp);   125     void * iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);   132     struct rxe_mem * lookup_mem(struct rxe_pd *pd, int access, u32 key, enum lookup_type type);   135     int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);   167     int qp_num(struct rxe_qp *qp);   196     void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);   198     void rxe_advance_resp_resource(struct rxe_qp *qp);   230     int rxe_responder(void *arg);   234     void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, struct sk_buff *skb);   167     unsigned int queue_count(const struct rxe_queue *q);    73     char *resp_state_name[30U] = { (char *)"NONE", (char *)"GET_REQ", (char *)"CHK_PSN", (char *)"CHK_OP_SEQ", (char *)"CHK_OP_VALID", (char *)"CHK_RESOURCE", (char *)"CHK_LENGTH", (char *)"CHK_RKEY", (char *)"EXECUTE", (char *)"READ_REPLY", (char *)"COMPLETE", (char *)"ACKNOWLEDGE", (char *)"CLEANUP", (char *)"DUPLICATE_REQUEST", (char *)"ERR_MALFORMED_WQE", (char *)"ERR_UNSUPPORTED_OPCODE", (char *)"ERR_MISALIGNED_ATOMIC", (char *)"ERR_PSN_OUT_OF_SEQ", (char *)"ERR_MISSING_OPCODE_FIRST", (char *)"ERR_MISSING_OPCODE_LAST_C", (char *)"ERR_MISSING_OPCODE_LAST_D1E", (char *)"ERR_TOO_MANY_RDMA_ATM_REQ", (char *)"ERR_RNR", (char *)"ERR_RKEY_VIOLATION", (char *)"ERR_LENGTH", (char *)"ERR_CQ_OVERFLOW", (char *)"ERROR", (char *)"RESET", (char *)"DONE", (char *)"EXIT" };   121     enum resp_states  get_req(struct rxe_qp *qp, struct rxe_pkt_info **pkt_p);   148     enum resp_states  check_psn___0(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   189     enum resp_states  check_op_seq(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   279     enum resp_states  check_op_valid(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   317     enum resp_states  get_srq_wqe(struct rxe_qp *qp);   359     enum resp_states  check_resource(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   403     enum resp_states  check_length(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   418     enum resp_states  check_rkey(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   502     enum resp_states  send_data_in(struct rxe_qp *qp, void *data_addr, int data_len);   517     enum resp_states  write_data_in(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   539     struct spinlock atomic_ops_lock = { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "atomic_ops_lock", 0, 0UL } } } };   541     enum resp_states  process_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   581     struct sk_buff * prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_pkt_info *ack, int opcode, int payload, u32 psn, u8 syndrome, u32 *crcp);   653     enum resp_states  read_reply(struct rxe_qp *qp, struct rxe_pkt_info *req_pkt);   755     enum resp_states  execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   812     enum resp_states  do_complete___0(struct rxe_qp *qp, struct rxe_pkt_info *pkt);   919     int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, u8 syndrome, u32 psn);   944     int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, u8 syndrome);   992     enum resp_states  acknowledge(struct rxe_qp *qp, struct rxe_pkt_info *pkt);  1008     enum resp_states  cleanup(struct rxe_qp *qp, struct rxe_pkt_info *pkt);  1027     struct resp_res * find_resource(struct rxe_qp *qp, u32 psn);  1046     enum resp_states  duplicate_request(struct rxe_qp *qp, struct rxe_pkt_info *pkt);  1141     void do_class_ac_error(struct rxe_qp *qp, u8 syndrome, enum ib_wc_status status);  1151     enum resp_states  do_class_d1e_error(struct rxe_qp *qp);   411     int sprintf(char *, const char *, ...);    62     int memcmp(const void *, const void *, size_t );   322     void * ldv_kmem_cache_alloc_182(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_202(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_192(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_201(struct sk_buff *ldv_func_arg1, gfp_t flags);   979     struct sk_buff * ldv_skb_clone_203(struct sk_buff *ldv_func_arg1, gfp_t flags);   984     struct sk_buff * ldv_skb_copy_194(const struct sk_buff *ldv_func_arg1, gfp_t flags);   997     int ldv_pskb_expand_head_189(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_190(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_198(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_199(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1013     int ldv_pskb_expand_head_200(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_195(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_196(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_197(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);   436     void __ipv6_addr_set_half(__be32 *addr, __be32 wh, __be32 wl);   456     void ipv6_addr_set(struct in6_addr *addr, __be32 w1, __be32 w2, __be32 w3, __be32 w4);   649     void ipv6_addr_set_v4mapped(const __be32 addr, struct in6_addr *v4mapped);    94     int ib_find_cached_gid_by_port(struct ib_device *, const union ib_gid *, enum ib_gid_type , u8 , struct net_device *, u16 *);   104     u8  __bth_opcode(void *arg);   167     u8  __bth_tver(void *arg);   182     u16  __bth_pkey(void *arg);   196     u32  __bth_qpn(void *arg);   284     u32  __bth_psn(void *arg);   300     u8  bth_opcode(struct rxe_pkt_info *pkt);   340     u8  bth_tver(struct rxe_pkt_info *pkt);   350     u16  bth_pkey(struct rxe_pkt_info *pkt);   360     u32  bth_qpn(struct rxe_pkt_info *pkt);   415     u32  bth_psn(struct rxe_pkt_info *pkt);   491     u32  __deth_qkey(void *arg);   519     u32  deth_qkey(struct rxe_pkt_info *pkt);   935     size_t  header_size(struct rxe_pkt_info *pkt);   152     void * rxe_pool_get_key(struct rxe_pool *pool, void *key);    42     int pkey_match(u16 key1, u16 key2);    68     int rxe_rcv(struct sk_buff *skb);   232     u32  rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);    39     int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp);    85     void set_bad_pkey_cntr(struct rxe_port *port);    93     void set_qkey_viol_cntr(struct rxe_port *port);   101     int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, u32 qpn, struct rxe_qp *qp);   154     int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp);   214     int hdr_check(struct rxe_pkt_info *pkt);   265     void rxe_rcv_pkt(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb);   275     void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb);   335     int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb);   420     const char __kstrtab_rxe_rcv[8U] = { 'r', 'x', 'e', '_', 'r', 'c', 'v', '\x0' };   420     const struct kernel_symbol __ksymtab_rxe_rcv;   420     const struct kernel_symbol __ksymtab_rxe_rcv = { (unsigned long)(&rxe_rcv), (const char *)(&__kstrtab_rxe_rcv) };    72     void set_bit(long nr, volatile unsigned long *addr);   110     void clear_bit(long nr, volatile unsigned long *addr);    28     unsigned long int find_next_zero_bit(const unsigned long *, unsigned long, unsigned long);    53     unsigned long int find_first_zero_bit(const unsigned long *, unsigned long);   184     void __might_sleep(const char *, int, int);   191     void bitmap_zero(unsigned long *dst, unsigned int nbits);    24     int atomic_read___0(const atomic_t *v);    36     void atomic_set___0(atomic_t *v, int i);   101     void atomic_dec(atomic_t *v);    62     void rb_insert_color(struct rb_node *, struct rb_root *);    63     void rb_erase(struct rb_node *, struct rb_root *);    82     void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link);    31     void kref_init___0(struct kref *kref);   127     struct kmem_cache * kmem_cache_create(const char *, size_t , size_t , unsigned long, void (*)(void *));   130     void kmem_cache_destroy(struct kmem_cache *);   322     void * ldv_kmem_cache_alloc_223(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_243(struct kmem_cache *ldv_func_arg1, gfp_t flags);   328     void kmem_cache_free(struct kmem_cache *, void *);   466     void * kmalloc(size_t size, gfp_t flags);   612     void * kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_233(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_242(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_235(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_230(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_231(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_239(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_240(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_241(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_236(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_237(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_238(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    72     struct rxe_type_info rxe_type_info[10U];   130     void * rxe_alloc(struct rxe_pool *pool);   135     void rxe_add_index(void *arg);   138     void rxe_drop_index(void *arg);   143     void rxe_add_key(void *arg, void *key);   146     void rxe_drop_key(void *arg);   478     void rxe_mc_cleanup(void *arg);    67     void rxe_cq_cleanup(void *arg);   140     void rxe_mem_cleanup(void *arg);   165     void rxe_qp_cleanup(void *arg);    41     struct rxe_type_info rxe_type_info[10U] = { { (char *)"rxe-uc", 496UL, 0, 0, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-pd", 104UL, 0, 0, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-ah", 184UL, 0, 1, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-srq", 320UL, 0, 2, 262144U, 131073U, 0UL, 0UL, 0 }, { (char *)"rxe-qp", 2912UL, &rxe_qp_cleanup, 2, 131072U, 16U, 0UL, 0UL, 0 }, { (char *)"rxe-cq", 336UL, &rxe_cq_cleanup, 0, 0U, 0U, 0UL, 0UL, 0 }, { (char *)"rxe-mr", 232UL, &rxe_mem_cleanup, 2, 262144U, 1U, 0UL, 0UL, 0 }, { (char *)"rxe-mw", 232UL, 0, 2, 393216U, 262145U, 0UL, 0UL, 0 }, { (char *)"rxe-mc_grp", 192UL, &rxe_mc_cleanup, 4, 0U, 0U, 160UL, 16UL, 0 }, { (char *)"rxe-mc_elem", 112UL, 0, 1, 0U, 0U, 0UL, 0UL, 0 } };   105     char * pool_name(struct rxe_pool *pool);   110     struct kmem_cache * pool_cache(struct rxe_pool *pool);   166     int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min);   239     void rxe_pool_release(struct kref *kref);   247     void rxe_pool_put(struct rxe_pool *pool);   268     u32  alloc_index(struct rxe_pool *pool);   282     void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new);   309     void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new);     1     unsigned long int __builtin_object_size(void *, int);   479     int fls64(__u64 x);   187     unsigned int fls_long(unsigned long l);    40     int __ilog2_u64(u64 n);    61     unsigned long int __roundup_pow_of_two(unsigned long n);   254     void __might_fault(const char *, int);    48     void __list_add(struct list_head *, struct list_head *, struct list_head *);    61     void list_add(struct list_head *new, struct list_head *head);   154     extern struct cpuinfo_x86 boot_cpu_data;   322     void * ldv_kmem_cache_alloc_263(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_283(struct kmem_cache *ldv_func_arg1, gfp_t flags);    19     void ldv_check_alloc_nonatomic();    75     void * ldv_vmalloc_user_284(unsigned long ldv_func_arg1);    88     void vfree(const void *);     5     void kasan_check_read(const void *, unsigned int);   697     unsigned long int _copy_to_user(void *, const void *, unsigned int);   722     void __copy_to_user_overflow();   775     unsigned long int copy_to_user(void *to, const void *from, unsigned long n);   971     struct sk_buff * ldv_skb_clone_273(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_282(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_275(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_270(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_271(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_279(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_280(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_281(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_276(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_277(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_278(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    93     void rxe_mmap_release(struct kref *ref);    95     struct rxe_mmap_info * rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, struct ib_ucontext *context, void *obj);    79     int do_mmap_info(struct rxe_dev *rxe, struct ib_udata *udata, bool is_req, struct ib_ucontext *context, struct rxe_queue_buf *buf, size_t buf_size, struct rxe_mmap_info **ip_p);    87     struct rxe_queue * rxe_queue_init(struct rxe_dev *rxe, int *num_elem, unsigned int elem_size);    91     int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, unsigned int elem_size, struct ib_ucontext *context, struct ib_udata *udata, spinlock_t *producer_lock, spinlock_t *consumer_lock);   101     void rxe_queue_cleanup(struct rxe_queue *q);   120     void advance_producer(struct rxe_queue *q);   132     void * producer_addr(struct rxe_queue *q);   143     int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, unsigned int num_elem);    33     extern struct module __this_module;   437     int fls(int x);    32     int __ilog2_u32(u32 n);   414     int snprintf(char *, size_t , const char *, ...);     5     void * ldv_err_ptr(long error);    87     void __bad_percpu_size();   295     void __bad_size_call_parameter();    27     size_t  strlcpy(char *, const char *, size_t );    23     void * ERR_PTR(long error);   138     void mutex_lock_nested(struct mutex *, unsigned int);   174     void mutex_unlock(struct mutex *);   322     void * ldv_kmem_cache_alloc_304(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_324(struct kmem_cache *ldv_func_arg1, gfp_t flags);     6     void kasan_check_write(const void *, unsigned int);   695     unsigned long int _copy_from_user(void *, const void *, unsigned int);   717     void __copy_from_user_overflow();   738     unsigned long int copy_from_user(void *to, const void *from, unsigned long n);   595     int device_create_file(struct device *, const struct device_attribute *);   597     void device_remove_file(struct device *, const struct device_attribute *);  1137     void dev_warn(const struct device *, const char *, ...);   971     struct sk_buff * ldv_skb_clone_314(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_323(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_316(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_311(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_312(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_320(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_321(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_322(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_317(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_318(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_319(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  3317     void dev_put(struct net_device *dev);  3328     void dev_hold(struct net_device *dev);    75     extern union ib_gid zgid;  1217     struct ib_rdma_wr * rdma_wr(struct ib_send_wr *wr);  1232     struct ib_atomic_wr * atomic_wr(struct ib_send_wr *wr);  1249     struct ib_ud_wr * ud_wr(struct ib_send_wr *wr);  1261     struct ib_reg_wr * reg_wr(struct ib_send_wr *wr);  2091     int ib_register_device(struct ib_device *, int (*)(struct ib_device *, u8 , struct kobject *));  2094     void ib_unregister_device(struct ib_device *);  3293     int ib_sg_to_pages(struct ib_mr *, struct scatterlist *, int, unsigned int *, int (*)(struct ib_mr *, u64 ));    52     int ib_get_cached_gid(struct ib_device *, u8 , int, union ib_gid *, struct ib_gid_attr *);   435     struct rxe_ucontext * to_ruc(struct ib_ucontext *uc);   440     struct rxe_pd * to_rpd(struct ib_pd *pd);   445     struct rxe_ah * to_rah(struct ib_ah *ah);   450     struct rxe_srq * to_rsrq(struct ib_srq *srq);   455     struct rxe_qp * to_rqp(struct ib_qp *qp);   460     struct rxe_cq * to_rcq(struct ib_cq *cq);    39     int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr);    41     int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num, struct rxe_av *av, struct ib_ah_attr *attr);    44     int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av, struct ib_ah_attr *attr);    47     int rxe_av_fill_ip_info(struct rxe_dev *rxe, struct rxe_av *av, struct ib_ah_attr *attr, struct ib_gid_attr *sgid_attr, union ib_gid *sgid);    56     int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata);    59     int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata);    63     int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct ib_udata *udata);    70     int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, struct rxe_mc_grp **grp_p);    73     int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_mc_grp *grp);    76     int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, union ib_gid *mgid);   100     int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);   108     int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_mem *mem);   111     int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata, struct rxe_mem *mem);   115     int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, int max_pages, struct rxe_mem *mem);   145     int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);   147     int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct ib_udata *udata, struct ib_pd *ibpd);   151     int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);   153     int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);   156     int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata);   159     int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);   163     void rxe_qp_destroy(struct rxe_qp *qp);   177     enum ib_qp_state  qp_state(struct rxe_qp *qp);   213     int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);   216     int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata);   220     int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata);   224     struct ib_dma_mapping_ops rxe_dma_mapping_ops;   114     int queue_full(struct rxe_queue *q);    38     int rxe_query_device(struct ib_device *dev, struct ib_device_attr *attr, struct ib_udata *uhw);    51     void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed, u8 *active_width);    75     int rxe_query_port(struct ib_device *dev, u8 port_num, struct ib_port_attr *attr);   115     int rxe_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid);   132     int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int index, const union ib_gid *gid, const struct ib_gid_attr *attr, void **context);   141     int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int index, void **context);   149     struct net_device * rxe_get_netdev(struct ib_device *device, u8 port_num);   162     int rxe_query_pkey(struct ib_device *device, u8 port_num, u16 index, u16 *pkey);   189     int rxe_modify_device(struct ib_device *dev, int mask, struct ib_device_modify *attr);   205     int rxe_modify_port(struct ib_device *dev, u8 port_num, int mask, struct ib_port_modify *attr);   230     enum rdma_link_layer  rxe_get_link_layer(struct ib_device *dev, u8 port_num);   238     struct ib_ucontext * rxe_alloc_ucontext(struct ib_device *dev, struct ib_udata *udata);   248     int rxe_dealloc_ucontext(struct ib_ucontext *ibuc);   256     int rxe_port_immutable(struct ib_device *dev, u8 port_num, struct ib_port_immutable *immutable);   274     struct ib_pd * rxe_alloc_pd(struct ib_device *dev, struct ib_ucontext *context, struct ib_udata *udata);   285     int rxe_dealloc_pd(struct ib_pd *ibpd);   293     int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr, struct rxe_av *av);   317     struct ib_ah * rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);   350     int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr);   367     int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr);   376     int rxe_destroy_ah(struct ib_ah *ibah);   385     int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr);   432     struct ib_srq * rxe_create_srq(struct ib_pd *ibpd, struct ib_srq_init_attr *init, struct ib_udata *udata);   470     int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata);   492     int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);   505     int rxe_destroy_srq(struct ib_srq *ibsrq);   519     int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr);   543     struct ib_qp * rxe_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init, struct ib_udata *udata);   585     int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata);   606     int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_qp_init_attr *init);   617     int rxe_destroy_qp(struct ib_qp *ibqp);   627     int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, unsigned int mask, unsigned int length);   654     void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, struct ib_send_wr *ibwr);   709     int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, unsigned int mask, unsigned int length, struct rxe_send_wqe *wqe);   763     int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, unsigned int mask, u32 length);   804     int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr);   864     int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr);   901     struct ib_cq * rxe_create_cq(struct ib_device *dev, const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata);   936     int rxe_destroy_cq(struct ib_cq *ibcq);   944     int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);   964     int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);   985     int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt);   993     int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);  1003     struct ib_mr * rxe_get_dma_mr(struct ib_pd *ibpd, int access);  1034     struct ib_mr * rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata);  1070     int rxe_dereg_mr(struct ib_mr *ibmr);  1081     struct ib_mr * rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg);  1117     int rxe_set_page(struct ib_mr *ibmr, u64 addr);  1136     int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents___0, unsigned int *sg_offset);  1156     int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);  1174     int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);  1182     ssize_t  rxe_show_parent(struct device *device, struct device_attribute *attr, char *buf);  1193     struct device_attribute dev_attr_parent = { { "parent", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &rxe_show_parent, (ssize_t  (*)(struct device *, struct device_attribute *, const char *, size_t ))0 };  1195     struct device_attribute *rxe_dev_attributes[1U] = { &dev_attr_parent };   322     void * ldv_kmem_cache_alloc_344(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_364(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_354(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_363(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_356(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_351(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_352(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_360(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_361(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_362(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_357(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_358(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_359(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);   623     bool  ipv6_addr_v4mapped(const struct in6_addr *a);   140     enum rdma_network_type  ib_gid_to_network_type(enum ib_gid_type gid_type, union ib_gid *gid);   185     void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid);    10     extern struct task_struct *current_task;    12     struct task_struct * get_current();   322     void * ldv_kmem_cache_alloc_384(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_404(struct kmem_cache *ldv_func_arg1, gfp_t flags);    45     bool  __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit);   971     struct sk_buff * ldv_skb_clone_394(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_403(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_396(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_391(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_392(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_400(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_401(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_402(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_397(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_398(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_399(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2103     int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len);   190     int rcv_wqe_size(int max_sge);   183     void ___might_sleep(const char *, int, int);    25     void INIT_LIST_HEAD___0(struct list_head *list);    36     void atomic_set___1(atomic_t *v, int i);   445     unsigned long int nsecs_to_jiffies(u64 );    95     void init_timer_key(struct timer_list *, unsigned int, const char *, struct lock_class_key *);   245     int del_timer_sync(struct timer_list *);  3209     int _cond_resched();   322     void * ldv_kmem_cache_alloc_424(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_444(struct kmem_cache *ldv_func_arg1, gfp_t flags);   217     int sock_create_kern(struct net *, int, int, int, struct socket **);   293     int kernel_sock_shutdown(struct socket *, enum sock_shutdown_cmd );   500     void kvfree(const void *);   971     struct sk_buff * ldv_skb_clone_434(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_443(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_436(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_431(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_432(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_440(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_441(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_442(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1548     void __skb_queue_head_init(struct sk_buff_head *list);  1562     void skb_queue_head_init(struct sk_buff_head *list);  2395     struct sk_buff * ldv___netdev_alloc_skb_437(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_438(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_439(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);     7     extern struct net init_net;  2154     int ib_modify_qp_is_ok(enum ib_qp_state , enum ib_qp_state , enum ib_qp_type , enum ib_qp_attr_mask , enum rdma_link_layer );    64     int rxe_init_task(void *obj, struct rxe_task *task, void *arg, int (*func)(void *), char *name);    68     void rxe_cleanup_task(struct rxe_task *task);    74     int __rxe_do_task(struct rxe_task *task);    90     void rxe_disable_task(struct rxe_task *task);    93     void rxe_enable_task(struct rxe_task *task);   139     char *rxe_qp_state_name[6U];    79     void rxe_drop_all_mcast_groups(struct rxe_qp *qp);    43     char *rxe_qp_state_name[6U] = { (char *)"RESET", (char *)"INIT", (char *)"READY", (char *)"DRAIN", (char *)"DRAINED", (char *)"ERROR" };    52     int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, int has_srq);   132     int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n);   144     void free_rd_atomic_resources(struct rxe_qp *qp);   171     void cleanup_rd_atomic_resources(struct rxe_qp *qp);   184     void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init);   226     int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata);   288     int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata);   501     void rxe_qp_reset(struct rxe_qp *qp);   557     void rxe_qp_drain(struct rxe_qp *qp);   204     bool  test_and_set_bit(long nr, volatile unsigned long *addr);   322     void * ldv_kmem_cache_alloc_464(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_484(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_474(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_483(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_476(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_471(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_472(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_480(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_481(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_482(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_477(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_478(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_479(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);   550     void __tasklet_schedule(struct tasklet_struct *);   552     void tasklet_schedule(struct tasklet_struct *t);   602     void tasklet_init(struct tasklet_struct *, void (*)(unsigned long), unsigned long);    69     void rxe_send_complete(unsigned long data);    52     bool  is_power_of_2(unsigned long n);     3     bool  ldv_is_err(const void *ptr);     6     long int ldv_ptr_err(const void *ptr);     7     extern unsigned long page_offset_base;    32     long int PTR_ERR(const void *ptr);    41     bool  IS_ERR(const void *ptr);   318     void * __kmalloc(size_t , gfp_t );   322     void * ldv_kmem_cache_alloc_504(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_524(struct kmem_cache *ldv_func_arg1, gfp_t flags);   564     void * kmalloc_array(size_t n, size_t size, gfp_t flags);  1003     void * lowmem_page_address(const struct page *page);   120     struct page * sg_page(struct scatterlist *sg);   246     struct scatterlist * sg_next(struct scatterlist *);   971     struct sk_buff * ldv_skb_clone_514(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_523(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_516(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_511(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_512(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_520(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_521(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_522(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_517(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_518(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_519(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    61     int ib_umem_offset(struct ib_umem *umem);    85     struct ib_umem * ib_umem_get(struct ib_ucontext *, unsigned long, size_t , int, int);    87     void ib_umem_release(struct ib_umem *);   137     int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem, u64 *page, int num_pages, u64 iova);    40     u8  rxe_get_key();    75     void rxe_mem_init(int access, struct rxe_mem *mem);   108     int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf);   264     void lookup_iova(struct rxe_mem *mem, u64 iova, int *m_out, int *n_out, size_t *offset_out);    13     int __get_order(unsigned long size);   467     struct page * alloc_pages(gfp_t flags, unsigned int order);   504     void free_pages(unsigned long, unsigned int);   322     void * ldv_kmem_cache_alloc_544(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_564(struct kmem_cache *ldv_func_arg1, gfp_t flags);   125     int valid_dma_direction(int dma_direction);   971     struct sk_buff * ldv_skb_clone_554(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_563(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_556(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_551(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_552(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_560(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_561(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_562(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_557(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_558(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_559(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    40     int rxe_mapping_error(struct ib_device *dev, u64 dma_addr);    45     u64  rxe_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction);    53     void rxe_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction);    60     u64  rxe_dma_map_page(struct ib_device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction);    82     void rxe_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction);    89     int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction);   114     void rxe_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);   121     void rxe_sync_single_for_cpu(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir);   127     void rxe_sync_single_for_device(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir);   133     void * rxe_dma_alloc_coherent(struct ib_device *dev, size_t size, u64 *dma_handle, gfp_t flag);   149     void rxe_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, u64 dma_handle);   155     struct ib_dma_mapping_ops rxe_dma_mapping_ops = { &rxe_mapping_error, &rxe_dma_map_single, &rxe_dma_unmap_single, &rxe_dma_map_page, &rxe_dma_unmap_page, &rxe_map_sg, &rxe_unmap_sg, &rxe_sync_single_for_cpu, &rxe_sync_single_for_device, &rxe_dma_alloc_coherent, &rxe_dma_free_coherent };   206     void ldv_main13_sequence_infinite_withcheck_stateful();   322     void * ldv_kmem_cache_alloc_584(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_604(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_594(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_603(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_596(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_591(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_592(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_600(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_601(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_602(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_597(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_598(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_599(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    41     struct rxe_wr_opcode_info rxe_wr_opcode_info[12U] = { { (char *)"IB_WR_RDMA_WRITE", { 0, 0, 17, 17 } }, { (char *)"IB_WR_RDMA_WRITE_WITH_IMM", { 0, 0, 17, 17 } }, { (char *)"IB_WR_SEND", { 5, 5, 5, 5, 5 } }, { (char *)"IB_WR_SEND_WITH_IMM", { 5, 5, 5, 5, 5 } }, { (char *)"IB_WR_RDMA_READ", { 0, 0, 8 } }, { (char *)"IB_WR_ATOMIC_CMP_AND_SWP", { 0, 0, 2 } }, { (char *)"IB_WR_ATOMIC_FETCH_AND_ADD", { 0, 0, 2 } }, { (char *)"IB_WR_LSO", { 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_WR_SEND_WITH_INV", { 0, 0, 5, 5, 5 } }, { (char *)"IB_WR_RDMA_READ_WITH_INV", { 0, 0, 8 } }, { (char *)"IB_WR_LOCAL_INV", { 0, 0, 64 } }, { (char *)"IB_WR_REG_MR", { 0, 0, 64 } } };   128     struct rxe_opcode_info rxe_opcode[256U] = { { (char *)"IB_OPCODE_RC_SEND_FIRST", 1333248, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_MIDDLE]", 2119680, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_LAST", 4741120, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE", 4742144, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_RC_SEND_ONLY", 6051840, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE", 6052864, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_FIRST", 1087496, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_MIDDLE", 2136064, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_LAST", 4233216, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE", 5020672, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_ONLY", 5281800, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 6069256, 32, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 28, 32 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_REQUEST", 5312520, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST", 1058832, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE", 2107392, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST", 4204560, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY", 5253136, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_ACKNOWLEDGE", 5251088, 16, { 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE", 5251152, 24, { 0, 0, 0, 0, 12, 0, 16, 0, 0, 0, 0, 24 } }, { (char *)"IB_OPCODE_RC_COMPARE_SWAP", 5378080, 40, { 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 40 } }, { (char *)"IB_OPCODE_RC_FETCH_ADD", 5378080, 40, { 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 40 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE", 4741248, 16, { 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 16 } }, { (char *)"IB_OPCODE_RC_SEND_ONLY_INV", 5003392, 16, { 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 16 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_UC_SEND_FIRST", 1333248, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_MIDDLE", 2119680, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_LAST", 4741120, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE", 4742144, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_UC_SEND_ONLY", 6051840, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE", 6052864, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_FIRST", 1087496, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_MIDDLE", 2136064, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_LAST", 4233216, 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE", 5020672, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 16 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_ONLY", 5281800, 28, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 28 } }, { (char *)"IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 6069256, 32, { 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 28, 32 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_RD_SEND_FIRST", 1334016, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_MIDDLE", 2120448, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_LAST", 4741888, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE", 4742912, 28, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 24, 28 } }, { (char *)"IB_OPCODE_RD_SEND_ONLY", 6052608, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE", 6053632, 28, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 24, 28 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_FIRST", 1088264, 40, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 0, 40 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_MIDDLE", 2136832, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_LAST", 4233984, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 0, 24 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE", 5021440, 28, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 16, 24, 28 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_ONLY", 5282568, 40, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 0, 40 } }, { (char *)"IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 6070024, 44, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 40, 44 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_REQUEST", 5313288, 40, { 0, 0, 0, 24, 0, 0, 0, 0, 12, 16, 0, 40 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST", 1059088, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12, 0, 0, 20 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE", 2107648, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 16 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST", 4204816, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12, 0, 0, 20 } }, { (char *)"IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY", 5253392, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12, 0, 0, 20 } }, { (char *)"IB_OPCODE_RD_ACKNOWLEDGE", 5251344, 20, { 0, 0, 0, 0, 16, 0, 0, 0, 12 } }, { (char *)"IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE", 5251408, 28, { 0, 0, 0, 0, 16, 0, 20, 0, 12 } }, { (char *)"RD_COMPARE_SWAP", 5378848, 52, { 0, 0, 0, 0, 0, 24, 0, 0, 12, 16, 0, 52 } }, { (char *)"IB_OPCODE_RD_FETCH_ADD", 5378848, 52, { 0, 0, 0, 0, 0, 24, 0, 0, 12, 16, 0, 52 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { (char *)"IB_OPCODE_UD_SEND_ONLY", 6052352, 20, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 20 } }, { (char *)"IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE", 6053376, 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 20, 24 } } };    25     void INIT_LIST_HEAD___1(struct list_head *list);   112     void __list_del_entry(struct list_head *);   113     void list_del(struct list_head *);   143     void list_del_init(struct list_head *entry);   187     int list_empty(const struct list_head *head);    36     void atomic_set___2(atomic_t *v, int i);    31     void kref_init___1(struct kref *kref);   322     void * ldv_kmem_cache_alloc_624(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_644(struct kmem_cache *ldv_func_arg1, gfp_t flags);    94     int remap_vmalloc_range(struct vm_area_struct *, void *, unsigned long);   971     struct sk_buff * ldv_skb_clone_634(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_643(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_636(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_631(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_632(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_640(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_641(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_642(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_637(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_638(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_639(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    66     void rxe_vma_open(struct vm_area_struct *vma);    73     void rxe_vma_close(struct vm_area_struct *vma);    80     struct vm_operations_struct rxe_vm_ops = { &rxe_vma_open, &rxe_vma_close, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   213     void ldv_main15_sequence_infinite_withcheck_stateful();   322     void * ldv_kmem_cache_alloc_664(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_684(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_674(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_683(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_676(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_671(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_672(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_680(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_681(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_682(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_677(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_678(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_679(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);    25     void INIT_LIST_HEAD___2(struct list_head *list);   187     int list_empty___0(const struct list_head *head);   322     void * ldv_kmem_cache_alloc_704(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_724(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_714(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_723(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_716(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_711(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_712(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_720(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_721(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_722(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_717(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_718(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_719(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);   308     bool  constant_test_bit(long nr, const volatile unsigned long *addr);   322     void * ldv_kmem_cache_alloc_744(struct kmem_cache *ldv_func_arg1, gfp_t flags);   540     void tasklet_unlock_wait(struct tasklet_struct *t);   581     void tasklet_disable_nosync(struct tasklet_struct *t);   587     void tasklet_disable(struct tasklet_struct *t);   594     void tasklet_enable(struct tasklet_struct *t);   600     void tasklet_kill(struct tasklet_struct *);    82     void rxe_do_task(unsigned long data);    75     void list_add_tail(struct list_head *new, struct list_head *head);   187     int list_empty___1(const struct list_head *head);    66     int strcmp(const char *, const char *);   502     int rcu_read_lock_held();   503     int rcu_read_lock_bh_held();   322     void * ldv_kmem_cache_alloc_766(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_786(struct kmem_cache *ldv_func_arg1, gfp_t flags);   831     struct dst_entry * skb_dst(const struct sk_buff *skb);   850     void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst);   909     struct sk_buff * alloc_skb(unsigned int size, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_776(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_785(struct sk_buff *ldv_func_arg1, gfp_t flags);   979     struct sk_buff * ldv_skb_clone_787(struct sk_buff *ldv_func_arg1, gfp_t flags);   984     struct sk_buff * ldv_skb_copy_778(const struct sk_buff *ldv_func_arg1, gfp_t flags);   997     int ldv_pskb_expand_head_773(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_774(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_782(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_783(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1013     int ldv_pskb_expand_head_784(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1047     void skb_clear_hash(struct sk_buff *skb);  1183     unsigned char * skb_end_pointer(const struct sk_buff *skb);  1784     bool  skb_is_nonlinear(const struct sk_buff *skb);  1905     unsigned char * skb_put(struct sk_buff *, unsigned int);  1915     unsigned char * skb_push(struct sk_buff *, unsigned int);  1916     unsigned char * __skb_push(struct sk_buff *skb, unsigned int len);  1936     unsigned char * __pskb_pull_tail(struct sk_buff *, int);  2006     void skb_reserve(struct sk_buff *skb, int len);  2126     unsigned char * skb_transport_header(const struct sk_buff *skb);  2131     void skb_reset_transport_header(struct sk_buff *skb);  2148     void skb_reset_network_header(struct sk_buff *skb);  2395     struct sk_buff * ldv___netdev_alloc_skb_779(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_780(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_781(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2807     int __skb_linearize(struct sk_buff *skb);  2819     int skb_linearize(struct sk_buff *skb);  3025     void skb_scrub_packet(struct sk_buff *, bool );   256     struct net * read_pnet(const possible_net_t *pnet);  2015     struct net * dev_net(const struct net_device *dev);  2279     int register_netdevice_notifier(struct notifier_block *);  2280     int unregister_netdevice_notifier(struct notifier_block *);  2311     struct net_device * netdev_notifier_info_to_dev(const struct netdev_notifier_info *info);  3744     int dev_mc_add(struct net_device *, const unsigned char *);  3747     int dev_mc_del(struct net_device *, const unsigned char *);   111     struct net_device * vlan_dev_real_dev(const struct net_device *);   273     void dst_release(struct dst_entry *);  2183     struct net * sock_net(const struct sock *sk);    25     struct udphdr * udp_hdr(const struct sk_buff *skb);   211     void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf);   831     void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass, __be32 flowlabel);   928     int ip6_local_out(struct net *, struct sock *, struct sk_buff *);   126     struct rtable * ip_route_output_flow(struct net *, struct flowi4 *, const struct sock *);   131     struct rtable * ip_route_output_key(struct net *net, struct flowi4 *flp);   213     extern const struct ipv6_stub *ipv6_stub;   117     void ip_send_check(struct iphdr *);   119     int ip_local_out(struct net *, struct sock *, struct sk_buff *);   330     void __ip_select_ident(struct net *, struct iphdr *, int);    38     int udp_sock_create4(struct net *, struct udp_port_cfg *, struct socket **);    42     int udp_sock_create6(struct net *, struct udp_port_cfg *, struct socket **);    52     int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp);    84     void setup_udp_tunnel_sock(struct net *, struct socket *, struct udp_tunnel_sock_cfg *);   142     void udp_tunnel_sock_release(struct socket *);  2086     struct ib_device * ib_alloc_device(size_t );  2160     void ib_dispatch_event(struct ib_event *);    46     struct rxe_recv_sockets recv_sockets = {  };    48     struct rxe_dev * rxe_net_add(struct net_device *ndev);    71     struct rxe_dev * net_to_rxe(struct net_device *ndev);    72     struct rxe_dev * get_rxe_by_name(const char *name);    74     void rxe_port_up(struct rxe_dev *rxe);    75     void rxe_port_down(struct rxe_dev *rxe);    49     struct list_head rxe_dev_list = { &rxe_dev_list, &rxe_dev_list };    50     struct spinlock dev_list_lock = {  };    88     __be64  rxe_mac_to_eui64(struct net_device *ndev);   106     __be64  node_guid(struct rxe_dev *rxe);   111     __be64  port_guid(struct rxe_dev *rxe);   116     struct device * dma_device(struct rxe_dev *rxe);   128     int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);   139     int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);   150     struct dst_entry * rxe_find_route4(struct net_device *ndev, struct in_addr *saddr, struct in_addr *daddr);   173     struct dst_entry * rxe_find_route6(struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr);   214     int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb);   242     struct socket * rxe_setup_udp_tunnel(struct net *net, __be16 port, bool ipv6);   279     void rxe_release_udp_tunnel(struct socket *sk);   284     void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, __be16 dst_port);   299     void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, __be32 saddr, __be32 daddr, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet);   330     void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, struct in6_addr *saddr, struct in6_addr *daddr, __u8 proto, __u8 prio, __u8 ttl);   353     int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av);   379     int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av);   404     int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);   420     void rxe_skb_tx_dtor(struct sk_buff *skb);   431     int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb);   467     int loopback(struct sk_buff *skb);   477     struct sk_buff * init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt);   517     char * parent_name(struct rxe_dev *rxe, unsigned int port_num);   522     enum rdma_link_layer  link_layer(struct rxe_dev *rxe, unsigned int port_num);   528     struct rxe_ifc_ops ifc_ops = { 0, &node_guid, &port_guid, &dma_device, &mcast_add, &mcast_delete, &prepare, &send, &loopback, &init_packet, &parent_name, &link_layer };   580     const char __kstrtab_rxe_remove_all[15U] = { 'r', 'x', 'e', '_', 'r', 'e', 'm', 'o', 'v', 'e', '_', 'a', 'l', 'l', '\x0' };   580     const struct kernel_symbol __ksymtab_rxe_remove_all;   580     const struct kernel_symbol __ksymtab_rxe_remove_all = { (unsigned long)(&rxe_remove_all), (const char *)(&__kstrtab_rxe_remove_all) };   582     void rxe_port_event(struct rxe_dev *rxe, enum ib_event_type event);   622     int rxe_notify(struct notifier_block *not_blk, unsigned long event, void *arg);   662     struct notifier_block rxe_net_notifier = { &rxe_notify, 0, 0 };   748     void ldv_main19_sequence_infinite_withcheck_stateful();    45     int strncmp(const char *, const char *, __kernel_size_t );   322     void * ldv_kmem_cache_alloc_807(struct kmem_cache *ldv_func_arg1, gfp_t flags);   326     void * ldv_kmem_cache_alloc_827(struct kmem_cache *ldv_func_arg1, gfp_t flags);   971     struct sk_buff * ldv_skb_clone_817(struct sk_buff *ldv_func_arg1, gfp_t flags);   975     struct sk_buff * ldv_skb_clone_826(struct sk_buff *ldv_func_arg1, gfp_t flags);   980     struct sk_buff * ldv_skb_copy_819(const struct sk_buff *ldv_func_arg1, gfp_t flags);   993     int ldv_pskb_expand_head_814(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   997     int ldv_pskb_expand_head_815(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1001     int ldv_pskb_expand_head_823(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1005     int ldv_pskb_expand_head_824(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1009     int ldv_pskb_expand_head_825(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  2395     struct sk_buff * ldv___netdev_alloc_skb_820(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2399     struct sk_buff * ldv___netdev_alloc_skb_821(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2403     struct sk_buff * ldv___netdev_alloc_skb_822(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2387     struct net_device * dev_get_by_name(struct net *, const char *);  3082     bool  netif_running(const struct net_device *dev);  3352     bool  netif_carrier_ok(const struct net_device *dev);    39     int sanitize_arg(const char *val, char *intf, int intf_len);    57     void rxe_set_port_state(struct net_device *ndev);    73     int rxe_param_set_add(const char *val, const struct kernel_param *kp);   116     int rxe_param_set_remove(const char *val, const struct kernel_param *kp);   197     void ldv_main20_sequence_infinite_withcheck_stateful();    10     void ldv_error();    25     int ldv_undef_int();    28     bool  ldv_is_err_or_null(const void *ptr);    20     int ldv_spin = 0;    30     struct page * ldv_some_page();    33     struct page * ldv_check_alloc_flags_and_return_some_page(gfp_t flags);    63     int ldv_spin_trylock();           return ;}         {   750     struct rxe_dev *var_group1;   751     union ib_gid *var_mcast_add_6_p1;   752     union ib_gid *var_mcast_delete_7_p1;   753     struct rxe_pkt_info *var_group2;   754     struct sk_buff *var_prepare_19_p2;   755     u32 *var_prepare_19_p3;   756     struct sk_buff *var_send_21_p2;   757     struct sk_buff *var_group3;   758     struct rxe_av *var_group4;   759     int var_init_packet_24_p2;   760     struct rxe_pkt_info *var_init_packet_24_p3;   761     unsigned int var_parent_name_25_p1;   762     unsigned int var_link_layer_26_p1;   763     struct notifier_block *var_group5;   764     unsigned long var_rxe_notify_32_p1;   765     void *var_rxe_notify_32_p2;   766     int tmp;   767     int tmp___0;   873     LDV_IN_INTERRUPT = 1;   882     ldv_initialize() { /* Function call is skipped due to function is undefined */}   888     goto ldv_66141;   888     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}   890     goto ldv_66140;   889     ldv_66140:;   891     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}   891     switch (tmp);  1057     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {}   469       int tmp;             {}   357         int err;   358         struct rxe_pkt_info *pkt;   359         struct rxe_dev *rxe;   360         __be32 *icrcp;   361         unsigned int calc_icrc;   362         unsigned int pack_icrc;   363         long tmp;   364         struct ratelimit_state _rs;   365         int tmp___0;   366         int tmp___1;   367         long tmp___2;   368         unsigned long tmp___3;   369         long tmp___4;   370         long tmp___5;   371         unsigned int tmp___6;   372         unsigned long tmp___7;   373         void *tmp___8;   374         unsigned int tmp___9;   375         char saddr[16U];   376         struct ipv6hdr *tmp___10;   377         struct iphdr *tmp___11;   378         struct ratelimit_state _rs___0;   379         int tmp___12;   380         long tmp___13;   381         unsigned int tmp___14;   382         long tmp___15;   358         pkt = (struct rxe_pkt_info *)(&(skb->cb));   359         rxe = pkt->rxe;   363         pkt->offset = 0U;   365         int __CPAchecker_TMP_0 = (int)(pkt->offset);               {   337           union ib_gid dgid;   338           union ib_gid *pdgid;   339           unsigned short index;   340           struct iphdr *tmp;   341           struct ipv6hdr *tmp___0;   342           int tmp___1;   341           unsigned int __CPAchecker_TMP_0 = (unsigned int)(skb->protocol);                 {    83             unsigned char *tmp;                   {  2145               unsigned char *__CPAchecker_TMP_0 = (unsigned char *)(skb->head);  2145               unsigned long __CPAchecker_TMP_1 = (unsigned long)(skb->network_header);  2145               return __CPAchecker_TMP_0 + __CPAchecker_TMP_1;;}    83             return (struct ipv6hdr *)tmp;;}   346           pdgid = (union ib_gid *)(&(tmp___0->daddr));   349           tmp___1 = ib_find_cached_gid_by_port(&(rxe->ib_dev), (const union ib_gid *)pdgid, 1, 1, rxe->ndev, &index) { /* Function call is skipped due to function is undefined */}}               {   302           unsigned char tmp;   302           void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   302           unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                 {   106             struct rxe_bth *bth;   106             bth = (struct rxe_bth *)arg;   108             return bth->opcode;;}   302           return tmp;;}               {   417           unsigned int tmp;   417           void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   417           unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                 {   286             struct rxe_bth *bth;   287             unsigned int tmp;   286             bth = (struct rxe_bth *)arg;                   {    57               unsigned int tmp;                     { 9 Ignored inline assembler code    10                 return val;;}    58               return tmp;;}   288             return tmp & 16777215U;;}   417           return tmp;;}   375         pkt->qp = (struct rxe_qp *)0;   376         int __CPAchecker_TMP_1 = (int)(pkt->opcode);   376         pkt->mask = (pkt->mask) | ((u32 )((rxe_opcode[__CPAchecker_TMP_1]).mask));               {   937           int __CPAchecker_TMP_0 = (int)(pkt->offset);   937           int __CPAchecker_TMP_1 = (int)(pkt->opcode);   937           return (size_t )(__CPAchecker_TMP_0 + ((rxe_opcode[__CPAchecker_TMP_1]).length));;}   378         size_t __CPAchecker_TMP_2 = (size_t )(skb->len);               {   216           struct rxe_dev *rxe;   217           struct rxe_port *port;   218           struct rxe_qp *qp;   219           unsigned int qpn;   220           unsigned int tmp;   221           int index;   222           int err;   223           struct ratelimit_state _rs;   224           int tmp___0;   225           unsigned char tmp___1;   226           long tmp___2;   227           void *tmp___3;   228           struct ratelimit_state _rs___0;   229           int tmp___4;   230           long tmp___5;   231           long tmp___6;   232           long tmp___7;   233           long tmp___8;   234           struct ratelimit_state _rs___1;   235           int tmp___9;   236           long tmp___10;   216           rxe = pkt->rxe;   217           port = &(rxe->port);   218           qp = (struct rxe_qp *)0;                 {   362             unsigned int tmp;   362             void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   362             unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                   {   198               struct rxe_bth *bth;   199               unsigned int tmp;   198               bth = (struct rxe_bth *)arg;                     {    57                 unsigned int tmp;                       { 9 Ignored inline assembler code    10                   return val;;}    58                 return tmp;;}   200               return tmp & 16777215U;;}   362             return tmp;;}   219           qpn = tmp;                 {   342             unsigned char tmp;   342             void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   342             unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                   {   169               struct rxe_bth *bth;   169               bth = (struct rxe_bth *)arg;   171               unsigned int __CPAchecker_TMP_0 = (unsigned int)(bth->flags);   171               return __CPAchecker_TMP_0 & 15U;;}   342             return tmp;;}   255           pkt->qp = qp;}   386         unsigned long __CPAchecker_TMP_3 = (unsigned long)(pkt->paylen);   386         icrcp = (__be32 *)((pkt->hdr) + (__CPAchecker_TMP_3 + 18446744073709551612UL));               {    57           unsigned int tmp;                 { 9 Ignored inline assembler code    10             return val;;}    58           return tmp;;}   387         pack_icrc = tmp___6;               {    40           unsigned int bth_offset;    41           struct iphdr *ip4h;    42           struct ipv6hdr *ip6h;    43           struct udphdr *udph;    44           struct rxe_bth *bth;    45           int crc;    46           int length;    47           int hdr_size;    48           u8 pshdr[60U];    49           struct iphdr *tmp;    50           struct ipv6hdr *tmp___0;    51           unsigned int tmp___1;    52           unsigned int tmp___2;    40           bth_offset = 0U;    41           ip4h = (struct iphdr *)0;    42           ip6h = (struct ipv6hdr *)0;    47           int __CPAchecker_TMP_1;    47           unsigned int __CPAchecker_TMP_2 = (unsigned int)(skb->protocol);    47           __CPAchecker_TMP_1 = 48;    47           hdr_size = __CPAchecker_TMP_1;    60           crc = -558161693;    62           unsigned int __CPAchecker_TMP_3 = (unsigned int)(skb->protocol);                 {    83             unsigned char *tmp;                   {  2145               unsigned char *__CPAchecker_TMP_0 = (unsigned char *)(skb->head);  2145               unsigned long __CPAchecker_TMP_1 = (unsigned long)(skb->network_header);  2145               return __CPAchecker_TMP_0 + __CPAchecker_TMP_1;;}    83             return (struct ipv6hdr *)tmp;;}    71           __memcpy((void *)(&pshdr), (const void *)tmp___0, (size_t )hdr_size) { /* Function call is skipped due to function is undefined */}    72           ip6h = (struct ipv6hdr *)(&pshdr);    73           udph = ((struct udphdr *)ip6h) + 1U;    75           __memset((void *)(&(ip6h->flow_lbl)), 255, 3UL) { /* Function call is skipped due to function is undefined */}    76           ip6h->priority = 15U;    77           ip6h->hop_limit = 255U;    79           udph->check = 65535U;    81           bth_offset = bth_offset + ((unsigned int)hdr_size);    83           const void *__CPAchecker_TMP_4 = (const void *)(pkt->hdr);    83           __memcpy(((void *)(&pshdr)) + ((unsigned long)bth_offset), __CPAchecker_TMP_4, 12UL) { /* Function call is skipped due to function is undefined */}    84           bth = ((struct rxe_bth *)(&pshdr)) + ((unsigned long)bth_offset);    87           bth->qpn = (bth->qpn) | 255U;    89           length = hdr_size + 12;    90           tmp___1 = crc32_le((u32 )crc, (const unsigned char *)(&pshdr), (size_t )length) { /* Function call is skipped due to function is undefined */}    90           crc = (int)tmp___1;    93           const unsigned char *__CPAchecker_TMP_5 = (const unsigned char *)(pkt->hdr);    93           int __CPAchecker_TMP_6 = (int)(pkt->opcode);    93           tmp___2 = crc32_le((u32 )crc, __CPAchecker_TMP_5 + 12U, (size_t )(((rxe_opcode[__CPAchecker_TMP_6]).length) + -12)) { /* Function call is skipped due to function is undefined */}    93           crc = (int)tmp___2;}               {   948           unsigned char tmp;                 {   332             unsigned char tmp;   332             void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   332             unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                   {   154               struct rxe_bth *bth;   154               bth = (struct rxe_bth *)arg;   156               unsigned int __CPAchecker_TMP_0 = (unsigned int)(bth->flags);   156               return (u8 )((__CPAchecker_TMP_0 & 48U) >> 4);;}   332             return tmp;;}   948           int __CPAchecker_TMP_0 = (int)(pkt->paylen);   948           int __CPAchecker_TMP_1 = (int)(pkt->opcode);   948           return (size_t )(((__CPAchecker_TMP_0 - (((rxe_opcode[__CPAchecker_TMP_1]).offset)[11])) - ((int)tmp)) + -4);;}               {   942           unsigned long __CPAchecker_TMP_0 = (unsigned long)(pkt->offset);   942           int __CPAchecker_TMP_1 = (int)(pkt->opcode);   942           return (void *)((pkt->hdr) + (__CPAchecker_TMP_0 + ((unsigned long)(((rxe_opcode[__CPAchecker_TMP_1]).offset)[11]))));;}   390         calc_icrc = crc32_le(calc_icrc, (const unsigned char *)tmp___8, tmp___7) { /* Function call is skipped due to function is undefined */}               {    57           unsigned int tmp;                 { 9 Ignored inline assembler code    10             return val;;}    58           return tmp;;}   391         calc_icrc = tmp___9;               {   362           unsigned int tmp;   362           void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   362           unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                 {   198             struct rxe_bth *bth;   199             unsigned int tmp;   198             bth = (struct rxe_bth *)arg;                   {    57               unsigned int tmp;                     { 9 Ignored inline assembler code    10                 return val;;}    58               return tmp;;}   200             return tmp & 16777215U;;}   362           return tmp;;}               {}   277           struct rxe_pkt_info *pkt;   278           struct rxe_mc_grp *mcg;   279           struct sk_buff *skb_copy___0;   280           struct rxe_mc_elem *mce;   281           struct rxe_qp *qp;   282           union ib_gid dgid;   283           int err;   284           struct iphdr *tmp;   285           struct ipv6hdr *tmp___0;   286           void *tmp___1;   287           const struct list_head *__mptr;   288           unsigned int tmp___2;   289           struct sk_buff *tmp___3;   290           const struct list_head *__mptr___0;   277           pkt = (struct rxe_pkt_info *)(&(skb->cb));   285           unsigned int __CPAchecker_TMP_0 = (unsigned int)(skb->protocol);   288           unsigned int __CPAchecker_TMP_1 = (unsigned int)(skb->protocol);                 {   470             struct rb_node *node;   471             struct rxe_pool_entry *elem;   472             int cmp;   473             unsigned long flags;   474             const struct rb_node *__mptr;   470             node = (struct rb_node *)0;   471             elem = (struct rxe_pool_entry *)0;   477             unsigned int __CPAchecker_TMP_0 = (unsigned int)(pool->state);   480             node = pool->tree.rb_node;   482             goto ldv_63933;   484             goto ldv_63932;   483             ldv_63932:;   483             __mptr = (const struct rb_node *)node;   483             elem = ((struct rxe_pool_entry *)__mptr) + 18446744073709551584UL;   485             cmp = memcmp(((const void *)elem) + (pool->key_offset), (const void *)key, pool->key_size) { /* Function call is skipped due to function is undefined */}   493             goto ldv_63931;                   {    42               _Bool __warned;    43               int __ret_warn_once;    44               int tmp;    45               int __ret_warn_on;    46               long tmp___0;    47               long tmp___1;                     {   156                 int __ret;   156                 __ret = i;   156                 switch (4UL);157                 __case__[4UL == 4UL] 156 Ignored inline assembler code   156                 goto ldv_3993;   156                 return __ret + i;;}    46               __ret_warn_once = tmp <= 1;    46               int __CPAchecker_TMP_0;    46               assume(!(__ret_warn_once != 0));                     __CPAchecker_TMP_0 = 0;    46               assume(!(tmp___1 != 0L));    48               return ;;}   501             out:;                   {                     {}   378                 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}   379                 return ;;}   501             void *__CPAchecker_TMP_1;   501             __CPAchecker_TMP_1 = (void *)elem;}   292           mcg = (struct rxe_mc_grp *)tmp___1;                 {                   {}   311               _raw_spin_lock_bh(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */}   312               return ;;}   298           __mptr = (const struct list_head *)(mcg->qp_list.next);   298           mce = ((struct rxe_mc_elem *)__mptr) + 18446744073709551552UL;   298           goto ldv_63902;   300           goto ldv_63901;   299           ldv_63901:;   299           qp = mce->qp;   300           pkt = (struct rxe_pkt_info *)(&(skb->cb));                 {    41             long tmp;    42             enum ib_qp_type tmp___0;    43             struct ratelimit_state _rs;    44             int tmp___1;    45             long tmp___2;    46             struct ratelimit_state _rs___0;    47             int tmp___3;    48             long tmp___4;    49             struct ratelimit_state _rs___1;    50             int tmp___5;    51             long tmp___6;    52             struct ratelimit_state _rs___2;    53             int tmp___7;    54             long tmp___8;    55             long tmp___9;                   {   174               return qp->ibqp.qp_type;;}    45             switch ((unsigned int)tmp___0);    61             int __CPAchecker_TMP_1 = (int)(pkt->opcode);    65             goto ldv_63789;    70             ldv_63789:;    74             int __CPAchecker_TMP_2;                   __CPAchecker_TMP_2 = 0;}                 {   362             unsigned int tmp;   362             void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   362             unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                   {   198               struct rxe_bth *bth;   199               unsigned int tmp;   198               bth = (struct rxe_bth *)arg;                     {    57                 unsigned int tmp;                       { 9 Ignored inline assembler code    10                   return val;;}    58                 return tmp;;}   200               return tmp & 16777215U;;}   362             return tmp;;}                 {}   103             int i;   104             int found_pkey;   105             struct rxe_port *port;   106             unsigned short pkey;   107             unsigned short tmp;   108             int tmp___0;   109             struct ratelimit_state _rs;   110             int tmp___1;   111             struct ratelimit_state _rs___0;   112             int tmp___2;   113             int tmp___3;   114             long tmp___4;   115             unsigned int qkey;   116             struct ratelimit_state _rs___1;   117             unsigned int tmp___5;   118             int tmp___6;   119             unsigned int tmp___7;   120             long tmp___8;   121             enum ib_qp_type tmp___9;   122             enum ib_qp_type tmp___10;   105             found_pkey = 0;   106             port = &(rxe->port);                   {   352               unsigned short tmp;   352               void *__CPAchecker_TMP_0 = (void *)(pkt->hdr);   352               unsigned long __CPAchecker_TMP_1 = (unsigned long)(pkt->offset);                     {   184                 struct rxe_bth *bth;   185                 unsigned short tmp;   184                 bth = (struct rxe_bth *)arg;   186                 int __CPAchecker_TMP_0 = (int)(bth->pkey);                       {    51                   return (__u16 )(((int)((short)(((int)val) << 8))) | ((int)((short)(((int)val) >> 8))));;}   186                 return tmp;;}   352               return tmp;;}   107             pkey = tmp;   109             pkt->pkey_index = 0U;                   {   174               return qp->ibqp.qp_type;;}                   {   174               return qp->ibqp.qp_type;;}   137             _L:;} |       Source code     
     1 #ifndef _ASM_X86_ATOMIC_H
    2 #define _ASM_X86_ATOMIC_H
    3 
    4 #include <linux/compiler.h>
    5 #include <linux/types.h>
    6 #include <asm/alternative.h>
    7 #include <asm/cmpxchg.h>
    8 #include <asm/rmwcc.h>
    9 #include <asm/barrier.h>
   10 
   11 /*
   12  * Atomic operations that C can't guarantee us.  Useful for
   13  * resource counting etc..
   14  */
   15 
   16 #define ATOMIC_INIT(i)	{ (i) }
   17 
   18 /**
   19  * atomic_read - read atomic variable
   20  * @v: pointer of type atomic_t
   21  *
   22  * Atomically reads the value of @v.
   23  */
   24 static __always_inline int atomic_read(const atomic_t *v)
   25 {
   26 	return READ_ONCE((v)->counter);
   27 }
   28 
   29 /**
   30  * atomic_set - set atomic variable
   31  * @v: pointer of type atomic_t
   32  * @i: required value
   33  *
   34  * Atomically sets the value of @v to @i.
   35  */
   36 static __always_inline void atomic_set(atomic_t *v, int i)
   37 {
   38 	WRITE_ONCE(v->counter, i);
   39 }
   40 
   41 /**
   42  * atomic_add - add integer to atomic variable
   43  * @i: integer value to add
   44  * @v: pointer of type atomic_t
   45  *
   46  * Atomically adds @i to @v.
   47  */
   48 static __always_inline void atomic_add(int i, atomic_t *v)
   49 {
   50 	asm volatile(LOCK_PREFIX "addl %1,%0"
   51 		     : "+m" (v->counter)
   52 		     : "ir" (i));
   53 }
   54 
   55 /**
   56  * atomic_sub - subtract integer from atomic variable
   57  * @i: integer value to subtract
   58  * @v: pointer of type atomic_t
   59  *
   60  * Atomically subtracts @i from @v.
   61  */
   62 static __always_inline void atomic_sub(int i, atomic_t *v)
   63 {
   64 	asm volatile(LOCK_PREFIX "subl %1,%0"
   65 		     : "+m" (v->counter)
   66 		     : "ir" (i));
   67 }
   68 
   69 /**
   70  * atomic_sub_and_test - subtract value from variable and test result
   71  * @i: integer value to subtract
   72  * @v: pointer of type atomic_t
   73  *
   74  * Atomically subtracts @i from @v and returns
   75  * true if the result is zero, or false for all
   76  * other cases.
   77  */
   78 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
   79 {
   80 	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
   81 }
   82 
   83 /**
   84  * atomic_inc - increment atomic variable
   85  * @v: pointer of type atomic_t
   86  *
   87  * Atomically increments @v by 1.
   88  */
   89 static __always_inline void atomic_inc(atomic_t *v)
   90 {
   91 	asm volatile(LOCK_PREFIX "incl %0"
   92 		     : "+m" (v->counter));
   93 }
   94 
   95 /**
   96  * atomic_dec - decrement atomic variable
   97  * @v: pointer of type atomic_t
   98  *
   99  * Atomically decrements @v by 1.
  100  */
  101 static __always_inline void atomic_dec(atomic_t *v)
  102 {
  103 	asm volatile(LOCK_PREFIX "decl %0"
  104 		     : "+m" (v->counter));
  105 }
  106 
  107 /**
  108  * atomic_dec_and_test - decrement and test
  109  * @v: pointer of type atomic_t
  110  *
  111  * Atomically decrements @v by 1 and
  112  * returns true if the result is 0, or false for all other
  113  * cases.
  114  */
  115 static __always_inline bool atomic_dec_and_test(atomic_t *v)
  116 {
  117 	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
  118 }
  119 
  120 /**
  121  * atomic_inc_and_test - increment and test
  122  * @v: pointer of type atomic_t
  123  *
  124  * Atomically increments @v by 1
  125  * and returns true if the result is zero, or false for all
  126  * other cases.
  127  */
  128 static __always_inline bool atomic_inc_and_test(atomic_t *v)
  129 {
  130 	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
  131 }
  132 
  133 /**
  134  * atomic_add_negative - add and test if negative
  135  * @i: integer value to add
  136  * @v: pointer of type atomic_t
  137  *
  138  * Atomically adds @i to @v and returns true
  139  * if the result is negative, or false when
  140  * result is greater than or equal to zero.
  141  */
  142 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
  143 {
  144 	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
  145 }
  146 
  147 /**
  148  * atomic_add_return - add integer and return
  149  * @i: integer value to add
  150  * @v: pointer of type atomic_t
  151  *
  152  * Atomically adds @i to @v and returns @i + @v
  153  */
  154 static __always_inline int atomic_add_return(int i, atomic_t *v)
  155 {
  156 	return i + xadd(&v->counter, i);
  157 }
  158 
  159 /**
  160  * atomic_sub_return - subtract integer and return
  161  * @v: pointer of type atomic_t
  162  * @i: integer value to subtract
  163  *
  164  * Atomically subtracts @i from @v and returns @v - @i
  165  */
  166 static __always_inline int atomic_sub_return(int i, atomic_t *v)
  167 {
  168 	return atomic_add_return(-i, v);
  169 }
  170 
  171 #define atomic_inc_return(v)  (atomic_add_return(1, v))
  172 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
  173 
  174 static __always_inline int atomic_fetch_add(int i, atomic_t *v)
  175 {
  176 	return xadd(&v->counter, i);
  177 }
  178 
  179 static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
  180 {
  181 	return xadd(&v->counter, -i);
  182 }
  183 
  184 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  185 {
  186 	return cmpxchg(&v->counter, old, new);
  187 }
  188 
  189 static inline int atomic_xchg(atomic_t *v, int new)
  190 {
  191 	return xchg(&v->counter, new);
  192 }
  193 
  194 #define ATOMIC_OP(op)							\
  195 static inline void atomic_##op(int i, atomic_t *v)			\
  196 {									\
  197 	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
  198 			: "+m" (v->counter)				\
  199 			: "ir" (i)					\
  200 			: "memory");					\
  201 }
  202 
  203 #define ATOMIC_FETCH_OP(op, c_op)					\
  204 static inline int atomic_fetch_##op(int i, atomic_t *v)		\
  205 {									\
  206 	int old, val = atomic_read(v);					\
  207 	for (;;) {							\
  208 		old = atomic_cmpxchg(v, val, val c_op i);		\
  209 		if (old == val)						\
  210 			break;						\
  211 		val = old;						\
  212 	}								\
  213 	return old;							\
  214 }
  215 
  216 #define ATOMIC_OPS(op, c_op)						\
  217 	ATOMIC_OP(op)							\
  218 	ATOMIC_FETCH_OP(op, c_op)
  219 
  220 ATOMIC_OPS(and, &)
  221 ATOMIC_OPS(or , |)
  222 ATOMIC_OPS(xor, ^)
  223 
  224 #undef ATOMIC_OPS
  225 #undef ATOMIC_FETCH_OP
  226 #undef ATOMIC_OP
  227 
  228 /**
  229  * __atomic_add_unless - add unless the number is already a given value
  230  * @v: pointer of type atomic_t
  231  * @a: the amount to add to v...
  232  * @u: ...unless v is equal to u.
  233  *
  234  * Atomically adds @a to @v, so long as @v was not already @u.
  235  * Returns the old value of @v.
  236  */
  237 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
  238 {
  239 	int c, old;
  240 	c = atomic_read(v);
  241 	for (;;) {
  242 		if (unlikely(c == (u)))
  243 			break;
  244 		old = atomic_cmpxchg((v), c, c + (a));
  245 		if (likely(old == c))
  246 			break;
  247 		c = old;
  248 	}
  249 	return c;
  250 }
  251 
  252 /**
  253  * atomic_inc_short - increment of a short integer
  254  * @v: pointer to type int
  255  *
  256  * Atomically adds 1 to @v
  257  * Returns the new value of @u
  258  */
  259 static __always_inline short int atomic_inc_short(short int *v)
  260 {
  261 	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
  262 	return *v;
  263 }
  264 
  265 #ifdef CONFIG_X86_32
  266 # include <asm/atomic64_32.h>
  267 #else
  268 # include <asm/atomic64_64.h>
  269 #endif
  270 
  271 #endif /* _ASM_X86_ATOMIC_H */           1 #ifndef _ASM_X86_SWAB_H
    2 #define _ASM_X86_SWAB_H
    3 
    4 #include <linux/types.h>
    5 #include <linux/compiler.h>
    6 
    7 static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
    8 {
    9 	asm("bswapl %0" : "=r" (val) : "0" (val));
   10 	return val;
   11 }
   12 #define __arch_swab32 __arch_swab32
   13 
   14 static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
   15 {
   16 #ifdef __i386__
   17 	union {
   18 		struct {
   19 			__u32 a;
   20 			__u32 b;
   21 		} s;
   22 		__u64 u;
   23 	} v;
   24 	v.u = val;
   25 	asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
   26 	    : "=r" (v.s.a), "=r" (v.s.b)
   27 	    : "0" (v.s.a), "1" (v.s.b));
   28 	return v.u;
   29 #else /* __i386__ */
   30 	asm("bswapq %0" : "=r" (val) : "0" (val));
   31 	return val;
   32 #endif
   33 }
   34 #define __arch_swab64 __arch_swab64
   35 
   36 #endif /* _ASM_X86_SWAB_H */           1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 extern void ldv_spin_lock(void);
    9 extern void ldv_spin_unlock(void);
   10 extern int ldv_spin_trylock(void);
   11 
   12 #include <linux/kernel.h>
   13 #include <verifier/rcv.h>
   14 #include <linux/module.h>
   15 #include <linux/slab.h>
   16 
   17 extern void *ldv_undefined_pointer(void);
   18 extern void ldv_check_alloc_flags(gfp_t flags);
   19 extern void ldv_check_alloc_nonatomic(void);
   20 /* Returns an arbitrary page in addition to checking flags */
   21 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
   22 #line 1 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_comp.c"
   23 /*
   24  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   25  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   26  *
   27  * This software is available to you under a choice of one of two
   28  * licenses.  You may choose to be licensed under the terms of the GNU
   29  * General Public License (GPL) Version 2, available from the file
   30  * COPYING in the main directory of this source tree, or the
   31  * OpenIB.org BSD license below:
   32  *
   33  *     Redistribution and use in source and binary forms, with or
   34  *     without modification, are permitted provided that the following
   35  *     conditions are met:
   36  *
   37  *	- Redistributions of source code must retain the above
   38  *	  copyright notice, this list of conditions and the following
   39  *	  disclaimer.
   40  *
   41  *	- Redistributions in binary form must reproduce the above
   42  *	  copyright notice, this list of conditions and the following
   43  *	  disclaimer in the documentation and/or other materials
   44  *	  provided with the distribution.
   45  *
   46  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   47  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   48  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   49  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   50  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   51  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   52  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   53  * SOFTWARE.
   54  */
   55 
   56 #include <linux/skbuff.h>
   57 
   58 #include "rxe.h"
   59 #include "rxe_loc.h"
   60 #include "rxe_queue.h"
   61 #include "rxe_task.h"
   62 
   63 enum comp_state {
   64 	COMPST_GET_ACK,
   65 	COMPST_GET_WQE,
   66 	COMPST_COMP_WQE,
   67 	COMPST_COMP_ACK,
   68 	COMPST_CHECK_PSN,
   69 	COMPST_CHECK_ACK,
   70 	COMPST_READ,
   71 	COMPST_ATOMIC,
   72 	COMPST_WRITE_SEND,
   73 	COMPST_UPDATE_COMP,
   74 	COMPST_ERROR_RETRY,
   75 	COMPST_RNR_RETRY,
   76 	COMPST_ERROR,
   77 	COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
   78 	COMPST_DONE, /* The completer finished successflly */
   79 };
   80 
   81 static char *comp_state_name[] =  {
   82 	[COMPST_GET_ACK]		= "GET ACK",
   83 	[COMPST_GET_WQE]		= "GET WQE",
   84 	[COMPST_COMP_WQE]		= "COMP WQE",
   85 	[COMPST_COMP_ACK]		= "COMP ACK",
   86 	[COMPST_CHECK_PSN]		= "CHECK PSN",
   87 	[COMPST_CHECK_ACK]		= "CHECK ACK",
   88 	[COMPST_READ]			= "READ",
   89 	[COMPST_ATOMIC]			= "ATOMIC",
   90 	[COMPST_WRITE_SEND]		= "WRITE/SEND",
   91 	[COMPST_UPDATE_COMP]		= "UPDATE COMP",
   92 	[COMPST_ERROR_RETRY]		= "ERROR RETRY",
   93 	[COMPST_RNR_RETRY]		= "RNR RETRY",
   94 	[COMPST_ERROR]			= "ERROR",
   95 	[COMPST_EXIT]			= "EXIT",
   96 	[COMPST_DONE]			= "DONE",
   97 };
   98 
   99 static unsigned long rnrnak_usec[32] = {
  100 	[IB_RNR_TIMER_655_36] = 655360,
  101 	[IB_RNR_TIMER_000_01] = 10,
  102 	[IB_RNR_TIMER_000_02] = 20,
  103 	[IB_RNR_TIMER_000_03] = 30,
  104 	[IB_RNR_TIMER_000_04] = 40,
  105 	[IB_RNR_TIMER_000_06] = 60,
  106 	[IB_RNR_TIMER_000_08] = 80,
  107 	[IB_RNR_TIMER_000_12] = 120,
  108 	[IB_RNR_TIMER_000_16] = 160,
  109 	[IB_RNR_TIMER_000_24] = 240,
  110 	[IB_RNR_TIMER_000_32] = 320,
  111 	[IB_RNR_TIMER_000_48] = 480,
  112 	[IB_RNR_TIMER_000_64] = 640,
  113 	[IB_RNR_TIMER_000_96] = 960,
  114 	[IB_RNR_TIMER_001_28] = 1280,
  115 	[IB_RNR_TIMER_001_92] = 1920,
  116 	[IB_RNR_TIMER_002_56] = 2560,
  117 	[IB_RNR_TIMER_003_84] = 3840,
  118 	[IB_RNR_TIMER_005_12] = 5120,
  119 	[IB_RNR_TIMER_007_68] = 7680,
  120 	[IB_RNR_TIMER_010_24] = 10240,
  121 	[IB_RNR_TIMER_015_36] = 15360,
  122 	[IB_RNR_TIMER_020_48] = 20480,
  123 	[IB_RNR_TIMER_030_72] = 30720,
  124 	[IB_RNR_TIMER_040_96] = 40960,
  125 	[IB_RNR_TIMER_061_44] = 61410,
  126 	[IB_RNR_TIMER_081_92] = 81920,
  127 	[IB_RNR_TIMER_122_88] = 122880,
  128 	[IB_RNR_TIMER_163_84] = 163840,
  129 	[IB_RNR_TIMER_245_76] = 245760,
  130 	[IB_RNR_TIMER_327_68] = 327680,
  131 	[IB_RNR_TIMER_491_52] = 491520,
  132 };
  133 
  134 static inline unsigned long rnrnak_jiffies(u8 timeout)
  135 {
  136 	return max_t(unsigned long,
  137 		usecs_to_jiffies(rnrnak_usec[timeout]), 1);
  138 }
  139 
  140 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
  141 {
  142 	switch (opcode) {
  143 	case IB_WR_RDMA_WRITE:			return IB_WC_RDMA_WRITE;
  144 	case IB_WR_RDMA_WRITE_WITH_IMM:		return IB_WC_RDMA_WRITE;
  145 	case IB_WR_SEND:			return IB_WC_SEND;
  146 	case IB_WR_SEND_WITH_IMM:		return IB_WC_SEND;
  147 	case IB_WR_RDMA_READ:			return IB_WC_RDMA_READ;
  148 	case IB_WR_ATOMIC_CMP_AND_SWP:		return IB_WC_COMP_SWAP;
  149 	case IB_WR_ATOMIC_FETCH_AND_ADD:	return IB_WC_FETCH_ADD;
  150 	case IB_WR_LSO:				return IB_WC_LSO;
  151 	case IB_WR_SEND_WITH_INV:		return IB_WC_SEND;
  152 	case IB_WR_RDMA_READ_WITH_INV:		return IB_WC_RDMA_READ;
  153 	case IB_WR_LOCAL_INV:			return IB_WC_LOCAL_INV;
  154 	case IB_WR_REG_MR:			return IB_WC_REG_MR;
  155 
  156 	default:
  157 		return 0xff;
  158 	}
  159 }
  160 
  161 void retransmit_timer(unsigned long data)
  162 {
  163 	struct rxe_qp *qp = (struct rxe_qp *)data;
  164 
  165 	if (qp->valid) {
  166 		qp->comp.timeout = 1;
  167 		rxe_run_task(&qp->comp.task, 1);
  168 	}
  169 }
  170 
  171 void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
  172 			struct sk_buff *skb)
  173 {
  174 	int must_sched;
  175 
  176 	skb_queue_tail(&qp->resp_pkts, skb);
  177 
  178 	must_sched = skb_queue_len(&qp->resp_pkts) > 1;
  179 	rxe_run_task(&qp->comp.task, must_sched);
  180 }
  181 
  182 static inline enum comp_state get_wqe(struct rxe_qp *qp,
  183 				      struct rxe_pkt_info *pkt,
  184 				      struct rxe_send_wqe **wqe_p)
  185 {
  186 	struct rxe_send_wqe *wqe;
  187 
  188 	/* we come here whether or not we found a response packet to see if
  189 	 * there are any posted WQEs
  190 	 */
  191 	wqe = queue_head(qp->sq.queue);
  192 	*wqe_p = wqe;
  193 
  194 	/* no WQE or requester has not started it yet */
  195 	if (!wqe || wqe->state == wqe_state_posted)
  196 		return pkt ? COMPST_DONE : COMPST_EXIT;
  197 
  198 	/* WQE does not require an ack */
  199 	if (wqe->state == wqe_state_done)
  200 		return COMPST_COMP_WQE;
  201 
  202 	/* WQE caused an error */
  203 	if (wqe->state == wqe_state_error)
  204 		return COMPST_ERROR;
  205 
  206 	/* we have a WQE, if we also have an ack check its PSN */
  207 	return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
  208 }
  209 
  210 static inline void reset_retry_counters(struct rxe_qp *qp)
  211 {
  212 	qp->comp.retry_cnt = qp->attr.retry_cnt;
  213 	qp->comp.rnr_retry = qp->attr.rnr_retry;
  214 }
  215 
  216 static inline enum comp_state check_psn(struct rxe_qp *qp,
  217 					struct rxe_pkt_info *pkt,
  218 					struct rxe_send_wqe *wqe)
  219 {
  220 	s32 diff;
  221 
  222 	/* check to see if response is past the oldest WQE. if it is, complete
  223 	 * send/write or error read/atomic
  224 	 */
  225 	diff = psn_compare(pkt->psn, wqe->last_psn);
  226 	if (diff > 0) {
  227 		if (wqe->state == wqe_state_pending) {
  228 			if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
  229 				return COMPST_ERROR_RETRY;
  230 
  231 			reset_retry_counters(qp);
  232 			return COMPST_COMP_WQE;
  233 		} else {
  234 			return COMPST_DONE;
  235 		}
  236 	}
  237 
  238 	/* compare response packet to expected response */
  239 	diff = psn_compare(pkt->psn, qp->comp.psn);
  240 	if (diff < 0) {
  241 		/* response is most likely a retried packet if it matches an
  242 		 * uncompleted WQE go complete it else ignore it
  243 		 */
  244 		if (pkt->psn == wqe->last_psn)
  245 			return COMPST_COMP_ACK;
  246 		else
  247 			return COMPST_DONE;
  248 	} else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
  249 		return COMPST_ERROR_RETRY;
  250 	} else {
  251 		return COMPST_CHECK_ACK;
  252 	}
  253 }
  254 
  255 static inline enum comp_state check_ack(struct rxe_qp *qp,
  256 					struct rxe_pkt_info *pkt,
  257 					struct rxe_send_wqe *wqe)
  258 {
  259 	unsigned int mask = pkt->mask;
  260 	u8 syn;
  261 
  262 	/* Check the sequence only */
  263 	switch (qp->comp.opcode) {
  264 	case -1:
  265 		/* Will catch all *_ONLY cases. */
  266 		if (!(mask & RXE_START_MASK))
  267 			return COMPST_ERROR;
  268 
  269 		break;
  270 
  271 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
  272 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
  273 		if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
  274 		    pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
  275 			return COMPST_ERROR;
  276 		}
  277 		break;
  278 	default:
  279 		WARN_ON(1);
  280 	}
  281 
  282 	/* Check operation validity. */
  283 	switch (pkt->opcode) {
  284 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
  285 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
  286 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
  287 		syn = aeth_syn(pkt);
  288 
  289 		if ((syn & AETH_TYPE_MASK) != AETH_ACK)
  290 			return COMPST_ERROR;
  291 
  292 		/* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
  293 		 * doesn't have an AETH)
  294 		 */
  295 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
  296 		if (wqe->wr.opcode != IB_WR_RDMA_READ &&
  297 		    wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
  298 			return COMPST_ERROR;
  299 		}
  300 		reset_retry_counters(qp);
  301 		return COMPST_READ;
  302 
  303 	case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
  304 		syn = aeth_syn(pkt);
  305 
  306 		if ((syn & AETH_TYPE_MASK) != AETH_ACK)
  307 			return COMPST_ERROR;
  308 
  309 		if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
  310 		    wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
  311 			return COMPST_ERROR;
  312 		reset_retry_counters(qp);
  313 		return COMPST_ATOMIC;
  314 
  315 	case IB_OPCODE_RC_ACKNOWLEDGE:
  316 		syn = aeth_syn(pkt);
  317 		switch (syn & AETH_TYPE_MASK) {
  318 		case AETH_ACK:
  319 			reset_retry_counters(qp);
  320 			return COMPST_WRITE_SEND;
  321 
  322 		case AETH_RNR_NAK:
  323 			return COMPST_RNR_RETRY;
  324 
  325 		case AETH_NAK:
  326 			switch (syn) {
  327 			case AETH_NAK_PSN_SEQ_ERROR:
  328 				/* a nak implicitly acks all packets with psns
  329 				 * before
  330 				 */
  331 				if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
  332 					qp->comp.psn = pkt->psn;
  333 					if (qp->req.wait_psn) {
  334 						qp->req.wait_psn = 0;
  335 						rxe_run_task(&qp->req.task, 1);
  336 					}
  337 				}
  338 				return COMPST_ERROR_RETRY;
  339 
  340 			case AETH_NAK_INVALID_REQ:
  341 				wqe->status = IB_WC_REM_INV_REQ_ERR;
  342 				return COMPST_ERROR;
  343 
  344 			case AETH_NAK_REM_ACC_ERR:
  345 				wqe->status = IB_WC_REM_ACCESS_ERR;
  346 				return COMPST_ERROR;
  347 
  348 			case AETH_NAK_REM_OP_ERR:
  349 				wqe->status = IB_WC_REM_OP_ERR;
  350 				return COMPST_ERROR;
  351 
  352 			default:
  353 				pr_warn("unexpected nak %x\n", syn);
  354 				wqe->status = IB_WC_REM_OP_ERR;
  355 				return COMPST_ERROR;
  356 			}
  357 
  358 		default:
  359 			return COMPST_ERROR;
  360 		}
  361 		break;
  362 
  363 	default:
  364 		pr_warn("unexpected opcode\n");
  365 	}
  366 
  367 	return COMPST_ERROR;
  368 }
  369 
  370 static inline enum comp_state do_read(struct rxe_qp *qp,
  371 				      struct rxe_pkt_info *pkt,
  372 				      struct rxe_send_wqe *wqe)
  373 {
  374 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  375 	int ret;
  376 
  377 	ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
  378 			&wqe->dma, payload_addr(pkt),
  379 			payload_size(pkt), to_mem_obj, NULL);
  380 	if (ret)
  381 		return COMPST_ERROR;
  382 
  383 	if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
  384 		return COMPST_COMP_ACK;
  385 	else
  386 		return COMPST_UPDATE_COMP;
  387 }
  388 
  389 static inline enum comp_state do_atomic(struct rxe_qp *qp,
  390 					struct rxe_pkt_info *pkt,
  391 					struct rxe_send_wqe *wqe)
  392 {
  393 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  394 	int ret;
  395 
  396 	u64 atomic_orig = atmack_orig(pkt);
  397 
  398 	ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
  399 			&wqe->dma, &atomic_orig,
  400 			sizeof(u64), to_mem_obj, NULL);
  401 	if (ret)
  402 		return COMPST_ERROR;
  403 	else
  404 		return COMPST_COMP_ACK;
  405 }
  406 
  407 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
  408 			  struct rxe_cqe *cqe)
  409 {
  410 	memset(cqe, 0, sizeof(*cqe));
  411 
  412 	if (!qp->is_user) {
  413 		struct ib_wc		*wc	= &cqe->ibwc;
  414 
  415 		wc->wr_id		= wqe->wr.wr_id;
  416 		wc->status		= wqe->status;
  417 		wc->opcode		= wr_to_wc_opcode(wqe->wr.opcode);
  418 		if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
  419 		    wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
  420 			wc->wc_flags = IB_WC_WITH_IMM;
  421 		wc->byte_len		= wqe->dma.length;
  422 		wc->qp			= &qp->ibqp;
  423 	} else {
  424 		struct ib_uverbs_wc	*uwc	= &cqe->uibwc;
  425 
  426 		uwc->wr_id		= wqe->wr.wr_id;
  427 		uwc->status		= wqe->status;
  428 		uwc->opcode		= wr_to_wc_opcode(wqe->wr.opcode);
  429 		if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
  430 		    wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
  431 			uwc->wc_flags = IB_WC_WITH_IMM;
  432 		uwc->byte_len		= wqe->dma.length;
  433 		uwc->qp_num		= qp->ibqp.qp_num;
  434 	}
  435 }
  436 
  437 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
  438 {
  439 	struct rxe_cqe cqe;
  440 
  441 	if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
  442 	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
  443 	    (qp->req.state == QP_STATE_ERROR)) {
  444 		make_send_cqe(qp, wqe, &cqe);
  445 		rxe_cq_post(qp->scq, &cqe, 0);
  446 	}
  447 
  448 	advance_consumer(qp->sq.queue);
  449 
  450 	/*
  451 	 * we completed something so let req run again
  452 	 * if it is trying to fence
  453 	 */
  454 	if (qp->req.wait_fence) {
  455 		qp->req.wait_fence = 0;
  456 		rxe_run_task(&qp->req.task, 1);
  457 	}
  458 }
  459 
  460 static inline enum comp_state complete_ack(struct rxe_qp *qp,
  461 					   struct rxe_pkt_info *pkt,
  462 					   struct rxe_send_wqe *wqe)
  463 {
  464 	unsigned long flags;
  465 
  466 	if (wqe->has_rd_atomic) {
  467 		wqe->has_rd_atomic = 0;
  468 		atomic_inc(&qp->req.rd_atomic);
  469 		if (qp->req.need_rd_atomic) {
  470 			qp->comp.timeout_retry = 0;
  471 			qp->req.need_rd_atomic = 0;
  472 			rxe_run_task(&qp->req.task, 1);
  473 		}
  474 	}
  475 
  476 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
  477 		/* state_lock used by requester & completer */
  478 		spin_lock_irqsave(&qp->state_lock, flags);
  479 		if ((qp->req.state == QP_STATE_DRAIN) &&
  480 		    (qp->comp.psn == qp->req.psn)) {
  481 			qp->req.state = QP_STATE_DRAINED;
  482 			spin_unlock_irqrestore(&qp->state_lock, flags);
  483 
  484 			if (qp->ibqp.event_handler) {
  485 				struct ib_event ev;
  486 
  487 				ev.device = qp->ibqp.device;
  488 				ev.element.qp = &qp->ibqp;
  489 				ev.event = IB_EVENT_SQ_DRAINED;
  490 				qp->ibqp.event_handler(&ev,
  491 					qp->ibqp.qp_context);
  492 			}
  493 		} else {
  494 			spin_unlock_irqrestore(&qp->state_lock, flags);
  495 		}
  496 	}
  497 
  498 	do_complete(qp, wqe);
  499 
  500 	if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
  501 		return COMPST_UPDATE_COMP;
  502 	else
  503 		return COMPST_DONE;
  504 }
  505 
  506 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
  507 					   struct rxe_pkt_info *pkt,
  508 					   struct rxe_send_wqe *wqe)
  509 {
  510 	qp->comp.opcode = -1;
  511 
  512 	if (pkt) {
  513 		if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
  514 			qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
  515 
  516 		if (qp->req.wait_psn) {
  517 			qp->req.wait_psn = 0;
  518 			rxe_run_task(&qp->req.task, 1);
  519 		}
  520 	}
  521 
  522 	do_complete(qp, wqe);
  523 
  524 	return COMPST_GET_WQE;
  525 }
  526 
  527 int rxe_completer(void *arg)
  528 {
  529 	struct rxe_qp *qp = (struct rxe_qp *)arg;
  530 	struct rxe_send_wqe *wqe = wqe;
  531 	struct sk_buff *skb = NULL;
  532 	struct rxe_pkt_info *pkt = NULL;
  533 	enum comp_state state;
  534 
  535 	if (!qp->valid) {
  536 		while ((skb = skb_dequeue(&qp->resp_pkts))) {
  537 			rxe_drop_ref(qp);
  538 			kfree_skb(skb);
  539 		}
  540 		skb = NULL;
  541 		pkt = NULL;
  542 
  543 		while (queue_head(qp->sq.queue))
  544 			advance_consumer(qp->sq.queue);
  545 
  546 		goto exit;
  547 	}
  548 
  549 	if (qp->req.state == QP_STATE_ERROR) {
  550 		while ((skb = skb_dequeue(&qp->resp_pkts))) {
  551 			rxe_drop_ref(qp);
  552 			kfree_skb(skb);
  553 		}
  554 		skb = NULL;
  555 		pkt = NULL;
  556 
  557 		while ((wqe = queue_head(qp->sq.queue))) {
  558 			wqe->status = IB_WC_WR_FLUSH_ERR;
  559 			do_complete(qp, wqe);
  560 		}
  561 
  562 		goto exit;
  563 	}
  564 
  565 	if (qp->req.state == QP_STATE_RESET) {
  566 		while ((skb = skb_dequeue(&qp->resp_pkts))) {
  567 			rxe_drop_ref(qp);
  568 			kfree_skb(skb);
  569 		}
  570 		skb = NULL;
  571 		pkt = NULL;
  572 
  573 		while (queue_head(qp->sq.queue))
  574 			advance_consumer(qp->sq.queue);
  575 
  576 		goto exit;
  577 	}
  578 
  579 	if (qp->comp.timeout) {
  580 		qp->comp.timeout_retry = 1;
  581 		qp->comp.timeout = 0;
  582 	} else {
  583 		qp->comp.timeout_retry = 0;
  584 	}
  585 
  586 	if (qp->req.need_retry)
  587 		goto exit;
  588 
  589 	state = COMPST_GET_ACK;
  590 
  591 	while (1) {
  592 		pr_debug("state = %s\n", comp_state_name[state]);
  593 		switch (state) {
  594 		case COMPST_GET_ACK:
  595 			skb = skb_dequeue(&qp->resp_pkts);
  596 			if (skb) {
  597 				pkt = SKB_TO_PKT(skb);
  598 				qp->comp.timeout_retry = 0;
  599 			}
  600 			state = COMPST_GET_WQE;
  601 			break;
  602 
  603 		case COMPST_GET_WQE:
  604 			state = get_wqe(qp, pkt, &wqe);
  605 			break;
  606 
  607 		case COMPST_CHECK_PSN:
  608 			state = check_psn(qp, pkt, wqe);
  609 			break;
  610 
  611 		case COMPST_CHECK_ACK:
  612 			state = check_ack(qp, pkt, wqe);
  613 			break;
  614 
  615 		case COMPST_READ:
  616 			state = do_read(qp, pkt, wqe);
  617 			break;
  618 
  619 		case COMPST_ATOMIC:
  620 			state = do_atomic(qp, pkt, wqe);
  621 			break;
  622 
  623 		case COMPST_WRITE_SEND:
  624 			if (wqe->state == wqe_state_pending &&
  625 			    wqe->last_psn == pkt->psn)
  626 				state = COMPST_COMP_ACK;
  627 			else
  628 				state = COMPST_UPDATE_COMP;
  629 			break;
  630 
  631 		case COMPST_COMP_ACK:
  632 			state = complete_ack(qp, pkt, wqe);
  633 			break;
  634 
  635 		case COMPST_COMP_WQE:
  636 			state = complete_wqe(qp, pkt, wqe);
  637 			break;
  638 
  639 		case COMPST_UPDATE_COMP:
  640 			if (pkt->mask & RXE_END_MASK)
  641 				qp->comp.opcode = -1;
  642 			else
  643 				qp->comp.opcode = pkt->opcode;
  644 
  645 			if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
  646 				qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
  647 
  648 			if (qp->req.wait_psn) {
  649 				qp->req.wait_psn = 0;
  650 				rxe_run_task(&qp->req.task, 1);
  651 			}
  652 
  653 			state = COMPST_DONE;
  654 			break;
  655 
  656 		case COMPST_DONE:
  657 			if (pkt) {
  658 				rxe_drop_ref(pkt->qp);
  659 				kfree_skb(skb);
  660 			}
  661 			goto done;
  662 
  663 		case COMPST_EXIT:
  664 			if (qp->comp.timeout_retry && wqe) {
  665 				state = COMPST_ERROR_RETRY;
  666 				break;
  667 			}
  668 
  669 			/* re reset the timeout counter if
  670 			 * (1) QP is type RC
  671 			 * (2) the QP is alive
  672 			 * (3) there is a packet sent by the requester that
  673 			 *     might be acked (we still might get spurious
  674 			 *     timeouts but try to keep them as few as possible)
  675 			 * (4) the timeout parameter is set
  676 			 */
  677 			if ((qp_type(qp) == IB_QPT_RC) &&
  678 			    (qp->req.state == QP_STATE_READY) &&
  679 			    (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
  680 			    qp->qp_timeout_jiffies)
  681 				mod_timer(&qp->retrans_timer,
  682 					  jiffies + qp->qp_timeout_jiffies);
  683 			goto exit;
  684 
  685 		case COMPST_ERROR_RETRY:
  686 			/* we come here if the retry timer fired and we did
  687 			 * not receive a response packet. try to retry the send
  688 			 * queue if that makes sense and the limits have not
  689 			 * been exceeded. remember that some timeouts are
  690 			 * spurious since we do not reset the timer but kick
  691 			 * it down the road or let it expire
  692 			 */
  693 
  694 			/* there is nothing to retry in this case */
  695 			if (!wqe || (wqe->state == wqe_state_posted))
  696 				goto exit;
  697 
  698 			if (qp->comp.retry_cnt > 0) {
  699 				if (qp->comp.retry_cnt != 7)
  700 					qp->comp.retry_cnt--;
  701 
  702 				/* no point in retrying if we have already
  703 				 * seen the last ack that the requester could
  704 				 * have caused
  705 				 */
  706 				if (psn_compare(qp->req.psn,
  707 						qp->comp.psn) > 0) {
  708 					/* tell the requester to retry the
  709 					 * send send queue next time around
  710 					 */
  711 					qp->req.need_retry = 1;
  712 					rxe_run_task(&qp->req.task, 1);
  713 				}
  714 				goto exit;
  715 			} else {
  716 				wqe->status = IB_WC_RETRY_EXC_ERR;
  717 				state = COMPST_ERROR;
  718 			}
  719 			break;
  720 
  721 		case COMPST_RNR_RETRY:
  722 			if (qp->comp.rnr_retry > 0) {
  723 				if (qp->comp.rnr_retry != 7)
  724 					qp->comp.rnr_retry--;
  725 
  726 				qp->req.need_retry = 1;
  727 				pr_debug("set rnr nak timer\n");
  728 				mod_timer(&qp->rnr_nak_timer,
  729 					  jiffies + rnrnak_jiffies(aeth_syn(pkt)
  730 						& ~AETH_TYPE_MASK));
  731 				goto exit;
  732 			} else {
  733 				wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
  734 				state = COMPST_ERROR;
  735 			}
  736 			break;
  737 
  738 		case COMPST_ERROR:
  739 			do_complete(qp, wqe);
  740 			rxe_qp_error(qp);
  741 			goto exit;
  742 		}
  743 	}
  744 
  745 exit:
  746 	/* we come here if we are done with processing and want the task to
  747 	 * exit from the loop calling us
  748 	 */
  749 	return -EAGAIN;
  750 
  751 done:
  752 	/* we come here if we have processed a packet we want the task to call
  753 	 * us again to see if there is anything else to do
  754 	 */
  755 	return 0;
  756 }
  757 
  758 #line 22 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_comp.o.c.prepared"           1 /*
    2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
    3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
    4  *
    5  * This software is available to you under a choice of one of two
    6  * licenses.  You may choose to be licensed under the terms of the GNU
    7  * General Public License (GPL) Version 2, available from the file
    8  * COPYING in the main directory of this source tree, or the
    9  * OpenIB.org BSD license below:
   10  *
   11  *     Redistribution and use in source and binary forms, with or
   12  *     without modification, are permitted provided that the following
   13  *     conditions are met:
   14  *
   15  *	- Redistributions of source code must retain the above
   16  *	  copyright notice, this list of conditions and the following
   17  *	  disclaimer.
   18  *
   19  *	- Redistributions in binary form must reproduce the above
   20  *	  copyright notice, this list of conditions and the following
   21  *	  disclaimer in the documentation and/or other materials
   22  *	  provided with the distribution.
   23  *
   24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   31  * SOFTWARE.
   32  */
   33 
   34 #include "rxe.h"
   35 #include "rxe_loc.h"
   36 
   37 /* Compute a partial ICRC for all the IB transport headers. */
   38 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
   39 {
   40 	unsigned int bth_offset = 0;
   41 	struct iphdr *ip4h = NULL;
   42 	struct ipv6hdr *ip6h = NULL;
   43 	struct udphdr *udph;
   44 	struct rxe_bth *bth;
   45 	int crc;
   46 	int length;
   47 	int hdr_size = sizeof(struct udphdr) +
   48 		(skb->protocol == htons(ETH_P_IP) ?
   49 		sizeof(struct iphdr) : sizeof(struct ipv6hdr));
   50 	/* pseudo header buffer size is calculate using ipv6 header size since
   51 	 * it is bigger than ipv4
   52 	 */
   53 	u8 pshdr[sizeof(struct udphdr) +
   54 		sizeof(struct ipv6hdr) +
   55 		RXE_BTH_BYTES];
   56 
   57 	/* This seed is the result of computing a CRC with a seed of
   58 	 * 0xfffffff and 8 bytes of 0xff representing a masked LRH.
   59 	 */
   60 	crc = 0xdebb20e3;
   61 
   62 	if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */
   63 		memcpy(pshdr, ip_hdr(skb), hdr_size);
   64 		ip4h = (struct iphdr *)pshdr;
   65 		udph = (struct udphdr *)(ip4h + 1);
   66 
   67 		ip4h->ttl = 0xff;
   68 		ip4h->check = CSUM_MANGLED_0;
   69 		ip4h->tos = 0xff;
   70 	} else {				/* IPv6 */
   71 		memcpy(pshdr, ipv6_hdr(skb), hdr_size);
   72 		ip6h = (struct ipv6hdr *)pshdr;
   73 		udph = (struct udphdr *)(ip6h + 1);
   74 
   75 		memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl));
   76 		ip6h->priority = 0xf;
   77 		ip6h->hop_limit = 0xff;
   78 	}
   79 	udph->check = CSUM_MANGLED_0;
   80 
   81 	bth_offset += hdr_size;
   82 
   83 	memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES);
   84 	bth = (struct rxe_bth *)&pshdr[bth_offset];
   85 
   86 	/* exclude bth.resv8a */
   87 	bth->qpn |= cpu_to_be32(~BTH_QPN_MASK);
   88 
   89 	length = hdr_size + RXE_BTH_BYTES;
   90 	crc = crc32_le(crc, pshdr, length);
   91 
   92 	/* And finish to compute the CRC on the remainder of the headers. */
   93 	crc = crc32_le(crc, pkt->hdr + RXE_BTH_BYTES,
   94 		       rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
   95 	return crc;
   96 }           1 
    2 /*
    3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
    4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
    5  *
    6  * This software is available to you under a choice of one of two
    7  * licenses.  You may choose to be licensed under the terms of the GNU
    8  * General Public License (GPL) Version 2, available from the file
    9  * COPYING in the main directory of this source tree, or the
   10  * OpenIB.org BSD license below:
   11  *
   12  *     Redistribution and use in source and binary forms, with or
   13  *     without modification, are permitted provided that the following
   14  *     conditions are met:
   15  *
   16  *	- Redistributions of source code must retain the above
   17  *	  copyright notice, this list of conditions and the following
   18  *	  disclaimer.
   19  *
   20  *	- Redistributions in binary form must reproduce the above
   21  *	  copyright notice, this list of conditions and the following
   22  *	  disclaimer in the documentation and/or other materials
   23  *	  provided with the distribution.
   24  *
   25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   32  * SOFTWARE.
   33  */
   34 
   35 #include <linux/skbuff.h>
   36 #include <linux/if_arp.h>
   37 #include <linux/netdevice.h>
   38 #include <linux/if.h>
   39 #include <linux/if_vlan.h>
   40 #include <net/udp_tunnel.h>
   41 #include <net/sch_generic.h>
   42 #include <linux/netfilter.h>
   43 #include <rdma/ib_addr.h>
   44 
   45 #include "rxe.h"
   46 #include "rxe_net.h"
   47 #include "rxe_loc.h"
   48 
   49 static LIST_HEAD(rxe_dev_list);
   50 static spinlock_t dev_list_lock; /* spinlock for device list */
   51 
   52 struct rxe_dev *net_to_rxe(struct net_device *ndev)
   53 {
   54 	struct rxe_dev *rxe;
   55 	struct rxe_dev *found = NULL;
   56 
   57 	spin_lock_bh(&dev_list_lock);
   58 	list_for_each_entry(rxe, &rxe_dev_list, list) {
   59 		if (rxe->ndev == ndev) {
   60 			found = rxe;
   61 			break;
   62 		}
   63 	}
   64 	spin_unlock_bh(&dev_list_lock);
   65 
   66 	return found;
   67 }
   68 
   69 struct rxe_dev *get_rxe_by_name(const char* name)
   70 {
   71 	struct rxe_dev *rxe;
   72 	struct rxe_dev *found = NULL;
   73 
   74 	spin_lock_bh(&dev_list_lock);
   75 	list_for_each_entry(rxe, &rxe_dev_list, list) {
   76 		if (!strcmp(name, rxe->ib_dev.name)) {
   77 			found = rxe;
   78 			break;
   79 		}
   80 	}
   81 	spin_unlock_bh(&dev_list_lock);
   82 	return found;
   83 }
   84 
   85 
   86 struct rxe_recv_sockets recv_sockets;
   87 
   88 static __be64 rxe_mac_to_eui64(struct net_device *ndev)
   89 {
   90 	unsigned char *mac_addr = ndev->dev_addr;
   91 	__be64 eui64;
   92 	unsigned char *dst = (unsigned char *)&eui64;
   93 
   94 	dst[0] = mac_addr[0] ^ 2;
   95 	dst[1] = mac_addr[1];
   96 	dst[2] = mac_addr[2];
   97 	dst[3] = 0xff;
   98 	dst[4] = 0xfe;
   99 	dst[5] = mac_addr[3];
  100 	dst[6] = mac_addr[4];
  101 	dst[7] = mac_addr[5];
  102 
  103 	return eui64;
  104 }
  105 
  106 static __be64 node_guid(struct rxe_dev *rxe)
  107 {
  108 	return rxe_mac_to_eui64(rxe->ndev);
  109 }
  110 
  111 static __be64 port_guid(struct rxe_dev *rxe)
  112 {
  113 	return rxe_mac_to_eui64(rxe->ndev);
  114 }
  115 
  116 static struct device *dma_device(struct rxe_dev *rxe)
  117 {
  118 	struct net_device *ndev;
  119 
  120 	ndev = rxe->ndev;
  121 
  122 	if (ndev->priv_flags & IFF_802_1Q_VLAN)
  123 		ndev = vlan_dev_real_dev(ndev);
  124 
  125 	return ndev->dev.parent;
  126 }
  127 
  128 static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
  129 {
  130 	int err;
  131 	unsigned char ll_addr[ETH_ALEN];
  132 
  133 	ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
  134 	err = dev_mc_add(rxe->ndev, ll_addr);
  135 
  136 	return err;
  137 }
  138 
  139 static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
  140 {
  141 	int err;
  142 	unsigned char ll_addr[ETH_ALEN];
  143 
  144 	ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
  145 	err = dev_mc_del(rxe->ndev, ll_addr);
  146 
  147 	return err;
  148 }
  149 
  150 static struct dst_entry *rxe_find_route4(struct net_device *ndev,
  151 				  struct in_addr *saddr,
  152 				  struct in_addr *daddr)
  153 {
  154 	struct rtable *rt;
  155 	struct flowi4 fl = { { 0 } };
  156 
  157 	memset(&fl, 0, sizeof(fl));
  158 	fl.flowi4_oif = ndev->ifindex;
  159 	memcpy(&fl.saddr, saddr, sizeof(*saddr));
  160 	memcpy(&fl.daddr, daddr, sizeof(*daddr));
  161 	fl.flowi4_proto = IPPROTO_UDP;
  162 
  163 	rt = ip_route_output_key(&init_net, &fl);
  164 	if (IS_ERR(rt)) {
  165 		pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
  166 		return NULL;
  167 	}
  168 
  169 	return &rt->dst;
  170 }
  171 
  172 #if IS_ENABLED(CONFIG_IPV6)
  173 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
  174 					 struct in6_addr *saddr,
  175 					 struct in6_addr *daddr)
  176 {
  177 	struct dst_entry *ndst;
  178 	struct flowi6 fl6 = { { 0 } };
  179 
  180 	memset(&fl6, 0, sizeof(fl6));
  181 	fl6.flowi6_oif = ndev->ifindex;
  182 	memcpy(&fl6.saddr, saddr, sizeof(*saddr));
  183 	memcpy(&fl6.daddr, daddr, sizeof(*daddr));
  184 	fl6.flowi6_proto = IPPROTO_UDP;
  185 
  186 	if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
  187 						recv_sockets.sk6->sk, &ndst, &fl6))) {
  188 		pr_err_ratelimited("no route to %pI6\n", daddr);
  189 		goto put;
  190 	}
  191 
  192 	if (unlikely(ndst->error)) {
  193 		pr_err("no route to %pI6\n", daddr);
  194 		goto put;
  195 	}
  196 
  197 	return ndst;
  198 put:
  199 	dst_release(ndst);
  200 	return NULL;
  201 }
  202 
  203 #else
  204 
  205 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
  206 					 struct in6_addr *saddr,
  207 					 struct in6_addr *daddr)
  208 {
  209 	return NULL;
  210 }
  211 
  212 #endif
  213 
  214 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
  215 {
  216 	struct udphdr *udph;
  217 	struct net_device *ndev = skb->dev;
  218 	struct rxe_dev *rxe = net_to_rxe(ndev);
  219 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  220 
  221 	if (!rxe)
  222 		goto drop;
  223 
  224 	if (skb_linearize(skb)) {
  225 		pr_err("skb_linearize failed\n");
  226 		goto drop;
  227 	}
  228 
  229 	udph = udp_hdr(skb);
  230 	pkt->rxe = rxe;
  231 	pkt->port_num = 1;
  232 	pkt->hdr = (u8 *)(udph + 1);
  233 	pkt->mask = RXE_GRH_MASK;
  234 	pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
  235 
  236 	return rxe_rcv(skb);
  237 drop:
  238 	kfree_skb(skb);
  239 	return 0;
  240 }
  241 
  242 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
  243 					   bool ipv6)
  244 {
  245 	int err;
  246 	struct socket *sock;
  247 	struct udp_port_cfg udp_cfg;
  248 	struct udp_tunnel_sock_cfg tnl_cfg;
  249 
  250 	memset(&udp_cfg, 0, sizeof(udp_cfg));
  251 
  252 	if (ipv6) {
  253 		udp_cfg.family = AF_INET6;
  254 		udp_cfg.ipv6_v6only = 1;
  255 	} else {
  256 		udp_cfg.family = AF_INET;
  257 	}
  258 
  259 	udp_cfg.local_udp_port = port;
  260 
  261 	/* Create UDP socket */
  262 	err = udp_sock_create(net, &udp_cfg, &sock);
  263 	if (err < 0) {
  264 		pr_err("failed to create udp socket. err = %d\n", err);
  265 		return ERR_PTR(err);
  266 	}
  267 
  268 	tnl_cfg.sk_user_data = NULL;
  269 	tnl_cfg.encap_type = 1;
  270 	tnl_cfg.encap_rcv = rxe_udp_encap_recv;
  271 	tnl_cfg.encap_destroy = NULL;
  272 
  273 	/* Setup UDP tunnel */
  274 	setup_udp_tunnel_sock(net, sock, &tnl_cfg);
  275 
  276 	return sock;
  277 }
  278 
  279 static void rxe_release_udp_tunnel(struct socket *sk)
  280 {
  281 	udp_tunnel_sock_release(sk);
  282 }
  283 
  284 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
  285 			    __be16 dst_port)
  286 {
  287 	struct udphdr *udph;
  288 
  289 	__skb_push(skb, sizeof(*udph));
  290 	skb_reset_transport_header(skb);
  291 	udph = udp_hdr(skb);
  292 
  293 	udph->dest = dst_port;
  294 	udph->source = src_port;
  295 	udph->len = htons(skb->len);
  296 	udph->check = 0;
  297 }
  298 
  299 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
  300 			     __be32 saddr, __be32 daddr, __u8 proto,
  301 			     __u8 tos, __u8 ttl, __be16 df, bool xnet)
  302 {
  303 	struct iphdr *iph;
  304 
  305 	skb_scrub_packet(skb, xnet);
  306 
  307 	skb_clear_hash(skb);
  308 	skb_dst_set(skb, dst);
  309 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  310 
  311 	skb_push(skb, sizeof(struct iphdr));
  312 	skb_reset_network_header(skb);
  313 
  314 	iph = ip_hdr(skb);
  315 
  316 	iph->version	=	IPVERSION;
  317 	iph->ihl	=	sizeof(struct iphdr) >> 2;
  318 	iph->frag_off	=	df;
  319 	iph->protocol	=	proto;
  320 	iph->tos	=	tos;
  321 	iph->daddr	=	daddr;
  322 	iph->saddr	=	saddr;
  323 	iph->ttl	=	ttl;
  324 	__ip_select_ident(dev_net(dst->dev), iph,
  325 			  skb_shinfo(skb)->gso_segs ?: 1);
  326 	iph->tot_len = htons(skb->len);
  327 	ip_send_check(iph);
  328 }
  329 
  330 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
  331 			     struct in6_addr *saddr, struct in6_addr *daddr,
  332 			     __u8 proto, __u8 prio, __u8 ttl)
  333 {
  334 	struct ipv6hdr *ip6h;
  335 
  336 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
  337 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
  338 			    | IPSKB_REROUTED);
  339 	skb_dst_set(skb, dst);
  340 
  341 	__skb_push(skb, sizeof(*ip6h));
  342 	skb_reset_network_header(skb);
  343 	ip6h		  = ipv6_hdr(skb);
  344 	ip6_flow_hdr(ip6h, prio, htonl(0));
  345 	ip6h->payload_len = htons(skb->len);
  346 	ip6h->nexthdr     = proto;
  347 	ip6h->hop_limit   = ttl;
  348 	ip6h->daddr	  = *daddr;
  349 	ip6h->saddr	  = *saddr;
  350 	ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  351 }
  352 
  353 static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
  354 {
  355 	struct dst_entry *dst;
  356 	bool xnet = false;
  357 	__be16 df = htons(IP_DF);
  358 	struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
  359 	struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
  360 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  361 
  362 	dst = rxe_find_route4(rxe->ndev, saddr, daddr);
  363 	if (!dst) {
  364 		pr_err("Host not reachable\n");
  365 		return -EHOSTUNREACH;
  366 	}
  367 
  368 	if (!memcmp(saddr, daddr, sizeof(*daddr)))
  369 		pkt->mask |= RXE_LOOPBACK_MASK;
  370 
  371 	prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
  372 			htons(ROCE_V2_UDP_DPORT));
  373 
  374 	prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
  375 			 av->grh.traffic_class, av->grh.hop_limit, df, xnet);
  376 	return 0;
  377 }
  378 
  379 static int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
  380 {
  381 	struct dst_entry *dst;
  382 	struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
  383 	struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
  384 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  385 
  386 	dst = rxe_find_route6(rxe->ndev, saddr, daddr);
  387 	if (!dst) {
  388 		pr_err("Host not reachable\n");
  389 		return -EHOSTUNREACH;
  390 	}
  391 
  392 	if (!memcmp(saddr, daddr, sizeof(*daddr)))
  393 		pkt->mask |= RXE_LOOPBACK_MASK;
  394 
  395 	prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
  396 			htons(ROCE_V2_UDP_DPORT));
  397 
  398 	prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
  399 			 av->grh.traffic_class,
  400 			 av->grh.hop_limit);
  401 	return 0;
  402 }
  403 
  404 static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  405 		   struct sk_buff *skb, u32 *crc)
  406 {
  407 	int err = 0;
  408 	struct rxe_av *av = rxe_get_av(pkt);
  409 
  410 	if (av->network_type == RDMA_NETWORK_IPV4)
  411 		err = prepare4(rxe, skb, av);
  412 	else if (av->network_type == RDMA_NETWORK_IPV6)
  413 		err = prepare6(rxe, skb, av);
  414 
  415 	*crc = rxe_icrc_hdr(pkt, skb);
  416 
  417 	return err;
  418 }
  419 
  420 static void rxe_skb_tx_dtor(struct sk_buff *skb)
  421 {
  422 	struct sock *sk = skb->sk;
  423 	struct rxe_qp *qp = sk->sk_user_data;
  424 	int skb_out = atomic_dec_return(&qp->skb_out);
  425 
  426 	if (unlikely(qp->need_req_skb &&
  427 		     skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
  428 		rxe_run_task(&qp->req.task, 1);
  429 }
  430 
  431 static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  432 		struct sk_buff *skb)
  433 {
  434 	struct sk_buff *nskb;
  435 	struct rxe_av *av;
  436 	int err;
  437 
  438 	av = rxe_get_av(pkt);
  439 
  440 	nskb = skb_clone(skb, GFP_ATOMIC);
  441 	if (!nskb)
  442 		return -ENOMEM;
  443 
  444 	nskb->destructor = rxe_skb_tx_dtor;
  445 	nskb->sk = pkt->qp->sk->sk;
  446 
  447 	if (av->network_type == RDMA_NETWORK_IPV4) {
  448 		err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
  449 	} else if (av->network_type == RDMA_NETWORK_IPV6) {
  450 		err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
  451 	} else {
  452 		pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
  453 		kfree_skb(nskb);
  454 		return -EINVAL;
  455 	}
  456 
  457 	if (unlikely(net_xmit_eval(err))) {
  458 		pr_debug("error sending packet: %d\n", err);
  459 		return -EAGAIN;
  460 	}
  461 
  462 	kfree_skb(skb);
  463 
  464 	return 0;
  465 }
  466 
  467 static int loopback(struct sk_buff *skb)
  468 {
  469 	return rxe_rcv(skb);
  470 }
  471 
  472 static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
  473 {
  474 	return rxe->port.port_guid == av->grh.dgid.global.interface_id;
  475 }
  476 
  477 static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
  478 				   int paylen, struct rxe_pkt_info *pkt)
  479 {
  480 	unsigned int hdr_len;
  481 	struct sk_buff *skb;
  482 
  483 	if (av->network_type == RDMA_NETWORK_IPV4)
  484 		hdr_len = ETH_HLEN + sizeof(struct udphdr) +
  485 			sizeof(struct iphdr);
  486 	else
  487 		hdr_len = ETH_HLEN + sizeof(struct udphdr) +
  488 			sizeof(struct ipv6hdr);
  489 
  490 	skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev),
  491 			GFP_ATOMIC);
  492 	if (unlikely(!skb))
  493 		return NULL;
  494 
  495 	skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
  496 
  497 	skb->dev	= rxe->ndev;
  498 	if (av->network_type == RDMA_NETWORK_IPV4)
  499 		skb->protocol = htons(ETH_P_IP);
  500 	else
  501 		skb->protocol = htons(ETH_P_IPV6);
  502 
  503 	pkt->rxe	= rxe;
  504 	pkt->port_num	= 1;
  505 	pkt->hdr	= skb_put(skb, paylen);
  506 	pkt->mask	|= RXE_GRH_MASK;
  507 
  508 	memset(pkt->hdr, 0, paylen);
  509 
  510 	return skb;
  511 }
  512 
  513 /*
  514  * this is required by rxe_cfg to match rxe devices in
  515  * /sys/class/infiniband up with their underlying ethernet devices
  516  */
  517 static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)
  518 {
  519 	return rxe->ndev->name;
  520 }
  521 
  522 static enum rdma_link_layer link_layer(struct rxe_dev *rxe,
  523 				       unsigned int port_num)
  524 {
  525 	return IB_LINK_LAYER_ETHERNET;
  526 }
  527 
  528 static struct rxe_ifc_ops ifc_ops = {
  529 	.node_guid	= node_guid,
  530 	.port_guid	= port_guid,
  531 	.dma_device	= dma_device,
  532 	.mcast_add	= mcast_add,
  533 	.mcast_delete	= mcast_delete,
  534 	.prepare	= prepare,
  535 	.send		= send,
  536 	.loopback	= loopback,
  537 	.init_packet	= init_packet,
  538 	.parent_name	= parent_name,
  539 	.link_layer	= link_layer,
  540 };
  541 
  542 struct rxe_dev *rxe_net_add(struct net_device *ndev)
  543 {
  544 	int err;
  545 	struct rxe_dev *rxe = NULL;
  546 
  547 	rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
  548 	if (!rxe)
  549 		return NULL;
  550 
  551 	rxe->ifc_ops = &ifc_ops;
  552 	rxe->ndev = ndev;
  553 
  554 	err = rxe_add(rxe, ndev->mtu);
  555 	if (err) {
  556 		ib_dealloc_device(&rxe->ib_dev);
  557 		return NULL;
  558 	}
  559 
  560 	spin_lock_bh(&dev_list_lock);
  561 	list_add_tail(&rxe_dev_list, &rxe->list);
  562 	spin_unlock_bh(&dev_list_lock);
  563 	return rxe;
  564 }
  565 
  566 void rxe_remove_all(void)
  567 {
  568 	spin_lock_bh(&dev_list_lock);
  569 	while (!list_empty(&rxe_dev_list)) {
  570 		struct rxe_dev *rxe =
  571 			list_first_entry(&rxe_dev_list, struct rxe_dev, list);
  572 
  573 		list_del(&rxe->list);
  574 		spin_unlock_bh(&dev_list_lock);
  575 		rxe_remove(rxe);
  576 		spin_lock_bh(&dev_list_lock);
  577 	}
  578 	spin_unlock_bh(&dev_list_lock);
  579 }
  580 EXPORT_SYMBOL(rxe_remove_all);
  581 
  582 static void rxe_port_event(struct rxe_dev *rxe,
  583 			   enum ib_event_type event)
  584 {
  585 	struct ib_event ev;
  586 
  587 	ev.device = &rxe->ib_dev;
  588 	ev.element.port_num = 1;
  589 	ev.event = event;
  590 
  591 	ib_dispatch_event(&ev);
  592 }
  593 
  594 /* Caller must hold net_info_lock */
  595 void rxe_port_up(struct rxe_dev *rxe)
  596 {
  597 	struct rxe_port *port;
  598 
  599 	port = &rxe->port;
  600 	port->attr.state = IB_PORT_ACTIVE;
  601 	port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
  602 
  603 	rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
  604 	pr_info("rxe: set %s active\n", rxe->ib_dev.name);
  605 	return;
  606 }
  607 
  608 /* Caller must hold net_info_lock */
  609 void rxe_port_down(struct rxe_dev *rxe)
  610 {
  611 	struct rxe_port *port;
  612 
  613 	port = &rxe->port;
  614 	port->attr.state = IB_PORT_DOWN;
  615 	port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
  616 
  617 	rxe_port_event(rxe, IB_EVENT_PORT_ERR);
  618 	pr_info("rxe: set %s down\n", rxe->ib_dev.name);
  619 	return;
  620 }
  621 
  622 static int rxe_notify(struct notifier_block *not_blk,
  623 		      unsigned long event,
  624 		      void *arg)
  625 {
  626 	struct net_device *ndev = netdev_notifier_info_to_dev(arg);
  627 	struct rxe_dev *rxe = net_to_rxe(ndev);
  628 
  629 	if (!rxe)
  630 		goto out;
  631 
  632 	switch (event) {
  633 	case NETDEV_UNREGISTER:
  634 		list_del(&rxe->list);
  635 		rxe_remove(rxe);
  636 		break;
  637 	case NETDEV_UP:
  638 		rxe_port_up(rxe);
  639 		break;
  640 	case NETDEV_DOWN:
  641 		rxe_port_down(rxe);
  642 		break;
  643 	case NETDEV_CHANGEMTU:
  644 		pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu);
  645 		rxe_set_mtu(rxe, ndev->mtu);
  646 		break;
  647 	case NETDEV_REBOOT:
  648 	case NETDEV_CHANGE:
  649 	case NETDEV_GOING_DOWN:
  650 	case NETDEV_CHANGEADDR:
  651 	case NETDEV_CHANGENAME:
  652 	case NETDEV_FEAT_CHANGE:
  653 	default:
  654 		pr_info("rxe: ignoring netdev event = %ld for %s\n",
  655 			event, ndev->name);
  656 		break;
  657 	}
  658 out:
  659 	return NOTIFY_OK;
  660 }
  661 
  662 static struct notifier_block rxe_net_notifier = {
  663 	.notifier_call = rxe_notify,
  664 };
  665 
  666 int rxe_net_init(void)
  667 {
  668 	int err;
  669 
  670 	spin_lock_init(&dev_list_lock);
  671 
  672 	recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
  673 			htons(ROCE_V2_UDP_DPORT), true);
  674 	if (IS_ERR(recv_sockets.sk6)) {
  675 		recv_sockets.sk6 = NULL;
  676 		pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
  677 		return -1;
  678 	}
  679 
  680 	recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
  681 			htons(ROCE_V2_UDP_DPORT), false);
  682 	if (IS_ERR(recv_sockets.sk4)) {
  683 		rxe_release_udp_tunnel(recv_sockets.sk6);
  684 		recv_sockets.sk4 = NULL;
  685 		recv_sockets.sk6 = NULL;
  686 		pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
  687 		return -1;
  688 	}
  689 
  690 	err = register_netdevice_notifier(&rxe_net_notifier);
  691 	if (err) {
  692 		rxe_release_udp_tunnel(recv_sockets.sk6);
  693 		rxe_release_udp_tunnel(recv_sockets.sk4);
  694 		pr_err("rxe: Failed to rigister netdev notifier\n");
  695 	}
  696 
  697 	return err;
  698 }
  699 
  700 void rxe_net_exit(void)
  701 {
  702 	if (recv_sockets.sk6)
  703 		rxe_release_udp_tunnel(recv_sockets.sk6);
  704 
  705 	if (recv_sockets.sk4)
  706 		rxe_release_udp_tunnel(recv_sockets.sk4);
  707 
  708 	unregister_netdevice_notifier(&rxe_net_notifier);
  709 }
  710 
  711 
  712 
  713 
  714 
  715 /* LDV_COMMENT_BEGIN_MAIN */
  716 #ifdef LDV_MAIN19_sequence_infinite_withcheck_stateful
  717 
  718 /*###########################################################################*/
  719 
  720 /*############## Driver Environment Generator 0.2 output ####################*/
  721 
  722 /*###########################################################################*/
  723 
  724 
  725 
  726 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  727 void ldv_check_final_state(void);
  728 
  729 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
  730 void ldv_check_return_value(int res);
  731 
  732 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
  733 void ldv_check_return_value_probe(int res);
  734 
  735 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
  736 void ldv_initialize(void);
  737 
  738 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
  739 void ldv_handler_precall(void);
  740 
  741 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
  742 int nondet_int(void);
  743 
  744 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
  745 int LDV_IN_INTERRUPT;
  746 
  747 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
  748 void ldv_main19_sequence_infinite_withcheck_stateful(void) {
  749 
  750 
  751 
  752 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
  753 	/*============================= VARIABLE DECLARATION PART   =============================*/
  754 	/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
  755 	/* content: static __be64 node_guid(struct rxe_dev *rxe)*/
  756 	/* LDV_COMMENT_END_PREP */
  757 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "node_guid" */
  758 	struct rxe_dev * var_group1;
  759 	/* LDV_COMMENT_BEGIN_PREP */
  760 	#if IS_ENABLED(CONFIG_IPV6)
  761 	#else
  762 	#endif
  763 	/* LDV_COMMENT_END_PREP */
  764 	/* content: static __be64 port_guid(struct rxe_dev *rxe)*/
  765 	/* LDV_COMMENT_END_PREP */
  766 	/* LDV_COMMENT_BEGIN_PREP */
  767 	#if IS_ENABLED(CONFIG_IPV6)
  768 	#else
  769 	#endif
  770 	/* LDV_COMMENT_END_PREP */
  771 	/* content: static struct device *dma_device(struct rxe_dev *rxe)*/
  772 	/* LDV_COMMENT_END_PREP */
  773 	/* LDV_COMMENT_BEGIN_PREP */
  774 	#if IS_ENABLED(CONFIG_IPV6)
  775 	#else
  776 	#endif
  777 	/* LDV_COMMENT_END_PREP */
  778 	/* content: static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)*/
  779 	/* LDV_COMMENT_END_PREP */
  780 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mcast_add" */
  781 	union ib_gid * var_mcast_add_6_p1;
  782 	/* LDV_COMMENT_BEGIN_PREP */
  783 	#if IS_ENABLED(CONFIG_IPV6)
  784 	#else
  785 	#endif
  786 	/* LDV_COMMENT_END_PREP */
  787 	/* content: static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)*/
  788 	/* LDV_COMMENT_END_PREP */
  789 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mcast_delete" */
  790 	union ib_gid * var_mcast_delete_7_p1;
  791 	/* LDV_COMMENT_BEGIN_PREP */
  792 	#if IS_ENABLED(CONFIG_IPV6)
  793 	#else
  794 	#endif
  795 	/* LDV_COMMENT_END_PREP */
  796 	/* content: static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)*/
  797 	/* LDV_COMMENT_BEGIN_PREP */
  798 	#if IS_ENABLED(CONFIG_IPV6)
  799 	#else
  800 	#endif
  801 	/* LDV_COMMENT_END_PREP */
  802 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prepare" */
  803 	struct rxe_pkt_info * var_group2;
  804 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prepare" */
  805 	struct sk_buff * var_prepare_19_p2;
  806 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prepare" */
  807 	u32 * var_prepare_19_p3;
  808 	/* content: static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)*/
  809 	/* LDV_COMMENT_BEGIN_PREP */
  810 	#if IS_ENABLED(CONFIG_IPV6)
  811 	#else
  812 	#endif
  813 	/* LDV_COMMENT_END_PREP */
  814 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "send" */
  815 	struct sk_buff * var_send_21_p2;
  816 	/* content: static int loopback(struct sk_buff *skb)*/
  817 	/* LDV_COMMENT_BEGIN_PREP */
  818 	#if IS_ENABLED(CONFIG_IPV6)
  819 	#else
  820 	#endif
  821 	/* LDV_COMMENT_END_PREP */
  822 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "loopback" */
  823 	struct sk_buff * var_group3;
  824 	/* content: static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt)*/
  825 	/* LDV_COMMENT_BEGIN_PREP */
  826 	#if IS_ENABLED(CONFIG_IPV6)
  827 	#else
  828 	#endif
  829 	/* LDV_COMMENT_END_PREP */
  830 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "init_packet" */
  831 	struct rxe_av * var_group4;
  832 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "init_packet" */
  833 	int  var_init_packet_24_p2;
  834 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "init_packet" */
  835 	struct rxe_pkt_info * var_init_packet_24_p3;
  836 	/* content: static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)*/
  837 	/* LDV_COMMENT_BEGIN_PREP */
  838 	#if IS_ENABLED(CONFIG_IPV6)
  839 	#else
  840 	#endif
  841 	/* LDV_COMMENT_END_PREP */
  842 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "parent_name" */
  843 	unsigned int  var_parent_name_25_p1;
  844 	/* content: static enum rdma_link_layer link_layer(struct rxe_dev *rxe, unsigned int port_num)*/
  845 	/* LDV_COMMENT_BEGIN_PREP */
  846 	#if IS_ENABLED(CONFIG_IPV6)
  847 	#else
  848 	#endif
  849 	/* LDV_COMMENT_END_PREP */
  850 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "link_layer" */
  851 	unsigned int  var_link_layer_26_p1;
  852 
  853 	/** STRUCT: struct type: notifier_block, struct name: rxe_net_notifier **/
  854 	/* content: static int rxe_notify(struct notifier_block *not_blk, unsigned long event, void *arg)*/
  855 	/* LDV_COMMENT_BEGIN_PREP */
  856 	#if IS_ENABLED(CONFIG_IPV6)
  857 	#else
  858 	#endif
  859 	/* LDV_COMMENT_END_PREP */
  860 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rxe_notify" */
  861 	struct notifier_block * var_group5;
  862 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rxe_notify" */
  863 	unsigned long  var_rxe_notify_32_p1;
  864 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rxe_notify" */
  865 	void * var_rxe_notify_32_p2;
  866 
  867 
  868 
  869 
  870 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
  871 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
  872 	/*============================= VARIABLE INITIALIZING PART  =============================*/
  873 	LDV_IN_INTERRUPT=1;
  874 
  875 
  876 
  877 
  878 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
  879 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
  880 	/*============================= FUNCTION CALL SECTION       =============================*/
  881 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
  882 	ldv_initialize();
  883 	
  884 
  885 	
  886 
  887 
  888 	while(  nondet_int()
  889 	) {
  890 
  891 		switch(nondet_int()) {
  892 
  893 			case 0: {
  894 
  895 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
  896 				
  897 
  898 				/* content: static __be64 node_guid(struct rxe_dev *rxe)*/
  899 				/* LDV_COMMENT_END_PREP */
  900 				/* LDV_COMMENT_FUNCTION_CALL Function from field "node_guid" from driver structure with callbacks "ifc_ops" */
  901 				ldv_handler_precall();
  902 				node_guid( var_group1);
  903 				/* LDV_COMMENT_BEGIN_PREP */
  904 				#if IS_ENABLED(CONFIG_IPV6)
  905 				#else
  906 				#endif
  907 				/* LDV_COMMENT_END_PREP */
  908 				
  909 
  910 				
  911 
  912 			}
  913 
  914 			break;
  915 			case 1: {
  916 
  917 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
  918 				
  919 
  920 				/* content: static __be64 port_guid(struct rxe_dev *rxe)*/
  921 				/* LDV_COMMENT_END_PREP */
  922 				/* LDV_COMMENT_FUNCTION_CALL Function from field "port_guid" from driver structure with callbacks "ifc_ops" */
  923 				ldv_handler_precall();
  924 				port_guid( var_group1);
  925 				/* LDV_COMMENT_BEGIN_PREP */
  926 				#if IS_ENABLED(CONFIG_IPV6)
  927 				#else
  928 				#endif
  929 				/* LDV_COMMENT_END_PREP */
  930 				
  931 
  932 				
  933 
  934 			}
  935 
  936 			break;
  937 			case 2: {
  938 
  939 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
  940 				
  941 
  942 				/* content: static struct device *dma_device(struct rxe_dev *rxe)*/
  943 				/* LDV_COMMENT_END_PREP */
  944 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dma_device" from driver structure with callbacks "ifc_ops" */
  945 				ldv_handler_precall();
  946 				dma_device( var_group1);
  947 				/* LDV_COMMENT_BEGIN_PREP */
  948 				#if IS_ENABLED(CONFIG_IPV6)
  949 				#else
  950 				#endif
  951 				/* LDV_COMMENT_END_PREP */
  952 				
  953 
  954 				
  955 
  956 			}
  957 
  958 			break;
  959 			case 3: {
  960 
  961 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
  962 				
  963 
  964 				/* content: static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)*/
  965 				/* LDV_COMMENT_END_PREP */
  966 				/* LDV_COMMENT_FUNCTION_CALL Function from field "mcast_add" from driver structure with callbacks "ifc_ops" */
  967 				ldv_handler_precall();
  968 				mcast_add( var_group1, var_mcast_add_6_p1);
  969 				/* LDV_COMMENT_BEGIN_PREP */
  970 				#if IS_ENABLED(CONFIG_IPV6)
  971 				#else
  972 				#endif
  973 				/* LDV_COMMENT_END_PREP */
  974 				
  975 
  976 				
  977 
  978 			}
  979 
  980 			break;
  981 			case 4: {
  982 
  983 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
  984 				
  985 
  986 				/* content: static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)*/
  987 				/* LDV_COMMENT_END_PREP */
  988 				/* LDV_COMMENT_FUNCTION_CALL Function from field "mcast_delete" from driver structure with callbacks "ifc_ops" */
  989 				ldv_handler_precall();
  990 				mcast_delete( var_group1, var_mcast_delete_7_p1);
  991 				/* LDV_COMMENT_BEGIN_PREP */
  992 				#if IS_ENABLED(CONFIG_IPV6)
  993 				#else
  994 				#endif
  995 				/* LDV_COMMENT_END_PREP */
  996 				
  997 
  998 				
  999 
 1000 			}
 1001 
 1002 			break;
 1003 			case 5: {
 1004 
 1005 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
 1006 				
 1007 
 1008 				/* content: static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)*/
 1009 				/* LDV_COMMENT_BEGIN_PREP */
 1010 				#if IS_ENABLED(CONFIG_IPV6)
 1011 				#else
 1012 				#endif
 1013 				/* LDV_COMMENT_END_PREP */
 1014 				/* LDV_COMMENT_FUNCTION_CALL Function from field "prepare" from driver structure with callbacks "ifc_ops" */
 1015 				ldv_handler_precall();
 1016 				prepare( var_group1, var_group2, var_prepare_19_p2, var_prepare_19_p3);
 1017 				
 1018 
 1019 				
 1020 
 1021 			}
 1022 
 1023 			break;
 1024 			case 6: {
 1025 
 1026 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
 1027 				
 1028 
 1029 				/* content: static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)*/
 1030 				/* LDV_COMMENT_BEGIN_PREP */
 1031 				#if IS_ENABLED(CONFIG_IPV6)
 1032 				#else
 1033 				#endif
 1034 				/* LDV_COMMENT_END_PREP */
 1035 				/* LDV_COMMENT_FUNCTION_CALL Function from field "send" from driver structure with callbacks "ifc_ops" */
 1036 				ldv_handler_precall();
 1037 				send( var_group1, var_group2, var_send_21_p2);
 1038 				
 1039 
 1040 				
 1041 
 1042 			}
 1043 
 1044 			break;
 1045 			case 7: {
 1046 
 1047 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
 1048 				
 1049 
 1050 				/* content: static int loopback(struct sk_buff *skb)*/
 1051 				/* LDV_COMMENT_BEGIN_PREP */
 1052 				#if IS_ENABLED(CONFIG_IPV6)
 1053 				#else
 1054 				#endif
 1055 				/* LDV_COMMENT_END_PREP */
 1056 				/* LDV_COMMENT_FUNCTION_CALL Function from field "loopback" from driver structure with callbacks "ifc_ops" */
 1057 				ldv_handler_precall();
 1058 				loopback( var_group3);
 1059 				
 1060 
 1061 				
 1062 
 1063 			}
 1064 
 1065 			break;
 1066 			case 8: {
 1067 
 1068 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
 1069 				
 1070 
 1071 				/* content: static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt)*/
 1072 				/* LDV_COMMENT_BEGIN_PREP */
 1073 				#if IS_ENABLED(CONFIG_IPV6)
 1074 				#else
 1075 				#endif
 1076 				/* LDV_COMMENT_END_PREP */
 1077 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init_packet" from driver structure with callbacks "ifc_ops" */
 1078 				ldv_handler_precall();
 1079 				init_packet( var_group1, var_group4, var_init_packet_24_p2, var_init_packet_24_p3);
 1080 				
 1081 
 1082 				
 1083 
 1084 			}
 1085 
 1086 			break;
 1087 			case 9: {
 1088 
 1089 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
 1090 				
 1091 
 1092 				/* content: static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)*/
 1093 				/* LDV_COMMENT_BEGIN_PREP */
 1094 				#if IS_ENABLED(CONFIG_IPV6)
 1095 				#else
 1096 				#endif
 1097 				/* LDV_COMMENT_END_PREP */
 1098 				/* LDV_COMMENT_FUNCTION_CALL Function from field "parent_name" from driver structure with callbacks "ifc_ops" */
 1099 				ldv_handler_precall();
 1100 				parent_name( var_group1, var_parent_name_25_p1);
 1101 				
 1102 
 1103 				
 1104 
 1105 			}
 1106 
 1107 			break;
 1108 			case 10: {
 1109 
 1110 				/** STRUCT: struct type: rxe_ifc_ops, struct name: ifc_ops **/
 1111 				
 1112 
 1113 				/* content: static enum rdma_link_layer link_layer(struct rxe_dev *rxe, unsigned int port_num)*/
 1114 				/* LDV_COMMENT_BEGIN_PREP */
 1115 				#if IS_ENABLED(CONFIG_IPV6)
 1116 				#else
 1117 				#endif
 1118 				/* LDV_COMMENT_END_PREP */
 1119 				/* LDV_COMMENT_FUNCTION_CALL Function from field "link_layer" from driver structure with callbacks "ifc_ops" */
 1120 				ldv_handler_precall();
 1121 				link_layer( var_group1, var_link_layer_26_p1);
 1122 				
 1123 
 1124 				
 1125 
 1126 			}
 1127 
 1128 			break;
 1129 			case 11: {
 1130 
 1131 				/** STRUCT: struct type: notifier_block, struct name: rxe_net_notifier **/
 1132 				
 1133 
 1134 				/* content: static int rxe_notify(struct notifier_block *not_blk, unsigned long event, void *arg)*/
 1135 				/* LDV_COMMENT_BEGIN_PREP */
 1136 				#if IS_ENABLED(CONFIG_IPV6)
 1137 				#else
 1138 				#endif
 1139 				/* LDV_COMMENT_END_PREP */
 1140 				/* LDV_COMMENT_FUNCTION_CALL Function from field "notifier_call" from driver structure with callbacks "rxe_net_notifier" */
 1141 				ldv_handler_precall();
 1142 				rxe_notify( var_group5, var_rxe_notify_32_p1, var_rxe_notify_32_p2);
 1143 				
 1144 
 1145 				
 1146 
 1147 			}
 1148 
 1149 			break;
 1150 			default: break;
 1151 
 1152 		}
 1153 
 1154 	}
 1155 
 1156 	ldv_module_exit: 
 1157 
 1158 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1159 	ldv_final: ldv_check_final_state();
 1160 
 1161 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1162 	return;
 1163 
 1164 }
 1165 #endif
 1166 
 1167 /* LDV_COMMENT_END_MAIN */           1 /*
    2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
    3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
    4  *
    5  * This software is available to you under a choice of one of two
    6  * licenses.  You may choose to be licensed under the terms of the GNU
    7  * General Public License (GPL) Version 2, available from the file
    8  * COPYING in the main directory of this source tree, or the
    9  * OpenIB.org BSD license below:
   10  *
   11  *	   Redistribution and use in source and binary forms, with or
   12  *	   without modification, are permitted provided that the following
   13  *	   conditions are met:
   14  *
   15  *		- Redistributions of source code must retain the above
   16  *		  copyright notice, this list of conditions and the following
   17  *		  disclaimer.
   18  *
   19  *		- Redistributions in binary form must reproduce the above
   20  *		  copyright notice, this list of conditions and the following
   21  *		  disclaimer in the documentation and/or other materials
   22  *		  provided with the distribution.
   23  *
   24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   31  * SOFTWARE.
   32  */
   33 
   34 #include "rxe.h"
   35 #include "rxe_loc.h"
   36 
   37 /* info about object pools
   38  * note that mr and mw share a single index space
   39  * so that one can map an lkey to the correct type of object
   40  */
   41 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
   42 	[RXE_TYPE_UC] = {
   43 		.name		= "rxe-uc",
   44 		.size		= sizeof(struct rxe_ucontext),
   45 	},
   46 	[RXE_TYPE_PD] = {
   47 		.name		= "rxe-pd",
   48 		.size		= sizeof(struct rxe_pd),
   49 	},
   50 	[RXE_TYPE_AH] = {
   51 		.name		= "rxe-ah",
   52 		.size		= sizeof(struct rxe_ah),
   53 		.flags		= RXE_POOL_ATOMIC,
   54 	},
   55 	[RXE_TYPE_SRQ] = {
   56 		.name		= "rxe-srq",
   57 		.size		= sizeof(struct rxe_srq),
   58 		.flags		= RXE_POOL_INDEX,
   59 		.min_index	= RXE_MIN_SRQ_INDEX,
   60 		.max_index	= RXE_MAX_SRQ_INDEX,
   61 	},
   62 	[RXE_TYPE_QP] = {
   63 		.name		= "rxe-qp",
   64 		.size		= sizeof(struct rxe_qp),
   65 		.cleanup	= rxe_qp_cleanup,
   66 		.flags		= RXE_POOL_INDEX,
   67 		.min_index	= RXE_MIN_QP_INDEX,
   68 		.max_index	= RXE_MAX_QP_INDEX,
   69 	},
   70 	[RXE_TYPE_CQ] = {
   71 		.name		= "rxe-cq",
   72 		.size		= sizeof(struct rxe_cq),
   73 		.cleanup	= rxe_cq_cleanup,
   74 	},
   75 	[RXE_TYPE_MR] = {
   76 		.name		= "rxe-mr",
   77 		.size		= sizeof(struct rxe_mem),
   78 		.cleanup	= rxe_mem_cleanup,
   79 		.flags		= RXE_POOL_INDEX,
   80 		.max_index	= RXE_MAX_MR_INDEX,
   81 		.min_index	= RXE_MIN_MR_INDEX,
   82 	},
   83 	[RXE_TYPE_MW] = {
   84 		.name		= "rxe-mw",
   85 		.size		= sizeof(struct rxe_mem),
   86 		.flags		= RXE_POOL_INDEX,
   87 		.max_index	= RXE_MAX_MW_INDEX,
   88 		.min_index	= RXE_MIN_MW_INDEX,
   89 	},
   90 	[RXE_TYPE_MC_GRP] = {
   91 		.name		= "rxe-mc_grp",
   92 		.size		= sizeof(struct rxe_mc_grp),
   93 		.cleanup	= rxe_mc_cleanup,
   94 		.flags		= RXE_POOL_KEY,
   95 		.key_offset	= offsetof(struct rxe_mc_grp, mgid),
   96 		.key_size	= sizeof(union ib_gid),
   97 	},
   98 	[RXE_TYPE_MC_ELEM] = {
   99 		.name		= "rxe-mc_elem",
  100 		.size		= sizeof(struct rxe_mc_elem),
  101 		.flags		= RXE_POOL_ATOMIC,
  102 	},
  103 };
  104 
  105 static inline char *pool_name(struct rxe_pool *pool)
  106 {
  107 	return rxe_type_info[pool->type].name;
  108 }
  109 
  110 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
  111 {
  112 	return rxe_type_info[pool->type].cache;
  113 }
  114 
  115 static inline enum rxe_elem_type rxe_type(void *arg)
  116 {
  117 	struct rxe_pool_entry *elem = arg;
  118 
  119 	return elem->pool->type;
  120 }
  121 
  122 int rxe_cache_init(void)
  123 {
  124 	int err;
  125 	int i;
  126 	size_t size;
  127 	struct rxe_type_info *type;
  128 
  129 	for (i = 0; i < RXE_NUM_TYPES; i++) {
  130 		type = &rxe_type_info[i];
  131 		size = ALIGN(type->size, RXE_POOL_ALIGN);
  132 		type->cache = kmem_cache_create(type->name, size,
  133 				RXE_POOL_ALIGN,
  134 				RXE_POOL_CACHE_FLAGS, NULL);
  135 		if (!type->cache) {
  136 			pr_err("Unable to init kmem cache for %s\n",
  137 			       type->name);
  138 			err = -ENOMEM;
  139 			goto err1;
  140 		}
  141 	}
  142 
  143 	return 0;
  144 
  145 err1:
  146 	while (--i >= 0) {
  147 		kmem_cache_destroy(type->cache);
  148 		type->cache = NULL;
  149 	}
  150 
  151 	return err;
  152 }
  153 
  154 void rxe_cache_exit(void)
  155 {
  156 	int i;
  157 	struct rxe_type_info *type;
  158 
  159 	for (i = 0; i < RXE_NUM_TYPES; i++) {
  160 		type = &rxe_type_info[i];
  161 		kmem_cache_destroy(type->cache);
  162 		type->cache = NULL;
  163 	}
  164 }
  165 
  166 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
  167 {
  168 	int err = 0;
  169 	size_t size;
  170 
  171 	if ((max - min + 1) < pool->max_elem) {
  172 		pr_warn("not enough indices for max_elem\n");
  173 		err = -EINVAL;
  174 		goto out;
  175 	}
  176 
  177 	pool->max_index = max;
  178 	pool->min_index = min;
  179 
  180 	size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
  181 	pool->table = kmalloc(size, GFP_KERNEL);
  182 	if (!pool->table) {
  183 		pr_warn("no memory for bit table\n");
  184 		err = -ENOMEM;
  185 		goto out;
  186 	}
  187 
  188 	pool->table_size = size;
  189 	bitmap_zero(pool->table, max - min + 1);
  190 
  191 out:
  192 	return err;
  193 }
  194 
  195 int rxe_pool_init(
  196 	struct rxe_dev		*rxe,
  197 	struct rxe_pool		*pool,
  198 	enum rxe_elem_type	type,
  199 	unsigned		max_elem)
  200 {
  201 	int			err = 0;
  202 	size_t			size = rxe_type_info[type].size;
  203 
  204 	memset(pool, 0, sizeof(*pool));
  205 
  206 	pool->rxe		= rxe;
  207 	pool->type		= type;
  208 	pool->max_elem		= max_elem;
  209 	pool->elem_size		= ALIGN(size, RXE_POOL_ALIGN);
  210 	pool->flags		= rxe_type_info[type].flags;
  211 	pool->tree		= RB_ROOT;
  212 	pool->cleanup		= rxe_type_info[type].cleanup;
  213 
  214 	atomic_set(&pool->num_elem, 0);
  215 
  216 	kref_init(&pool->ref_cnt);
  217 
  218 	spin_lock_init(&pool->pool_lock);
  219 
  220 	if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
  221 		err = rxe_pool_init_index(pool,
  222 					  rxe_type_info[type].max_index,
  223 					  rxe_type_info[type].min_index);
  224 		if (err)
  225 			goto out;
  226 	}
  227 
  228 	if (rxe_type_info[type].flags & RXE_POOL_KEY) {
  229 		pool->key_offset = rxe_type_info[type].key_offset;
  230 		pool->key_size = rxe_type_info[type].key_size;
  231 	}
  232 
  233 	pool->state = rxe_pool_valid;
  234 
  235 out:
  236 	return err;
  237 }
  238 
  239 static void rxe_pool_release(struct kref *kref)
  240 {
  241 	struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
  242 
  243 	pool->state = rxe_pool_invalid;
  244 	kfree(pool->table);
  245 }
  246 
  247 static void rxe_pool_put(struct rxe_pool *pool)
  248 {
  249 	kref_put(&pool->ref_cnt, rxe_pool_release);
  250 }
  251 
  252 int rxe_pool_cleanup(struct rxe_pool *pool)
  253 {
  254 	unsigned long flags;
  255 
  256 	spin_lock_irqsave(&pool->pool_lock, flags);
  257 	pool->state = rxe_pool_invalid;
  258 	if (atomic_read(&pool->num_elem) > 0)
  259 		pr_warn("%s pool destroyed with unfree'd elem\n",
  260 			pool_name(pool));
  261 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  262 
  263 	rxe_pool_put(pool);
  264 
  265 	return 0;
  266 }
  267 
  268 static u32 alloc_index(struct rxe_pool *pool)
  269 {
  270 	u32 index;
  271 	u32 range = pool->max_index - pool->min_index + 1;
  272 
  273 	index = find_next_zero_bit(pool->table, range, pool->last);
  274 	if (index >= range)
  275 		index = find_first_zero_bit(pool->table, range);
  276 
  277 	set_bit(index, pool->table);
  278 	pool->last = index;
  279 	return index + pool->min_index;
  280 }
  281 
  282 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
  283 {
  284 	struct rb_node **link = &pool->tree.rb_node;
  285 	struct rb_node *parent = NULL;
  286 	struct rxe_pool_entry *elem;
  287 
  288 	while (*link) {
  289 		parent = *link;
  290 		elem = rb_entry(parent, struct rxe_pool_entry, node);
  291 
  292 		if (elem->index == new->index) {
  293 			pr_warn("element already exists!\n");
  294 			goto out;
  295 		}
  296 
  297 		if (elem->index > new->index)
  298 			link = &(*link)->rb_left;
  299 		else
  300 			link = &(*link)->rb_right;
  301 	}
  302 
  303 	rb_link_node(&new->node, parent, link);
  304 	rb_insert_color(&new->node, &pool->tree);
  305 out:
  306 	return;
  307 }
  308 
  309 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
  310 {
  311 	struct rb_node **link = &pool->tree.rb_node;
  312 	struct rb_node *parent = NULL;
  313 	struct rxe_pool_entry *elem;
  314 	int cmp;
  315 
  316 	while (*link) {
  317 		parent = *link;
  318 		elem = rb_entry(parent, struct rxe_pool_entry, node);
  319 
  320 		cmp = memcmp((u8 *)elem + pool->key_offset,
  321 			     (u8 *)new + pool->key_offset, pool->key_size);
  322 
  323 		if (cmp == 0) {
  324 			pr_warn("key already exists!\n");
  325 			goto out;
  326 		}
  327 
  328 		if (cmp > 0)
  329 			link = &(*link)->rb_left;
  330 		else
  331 			link = &(*link)->rb_right;
  332 	}
  333 
  334 	rb_link_node(&new->node, parent, link);
  335 	rb_insert_color(&new->node, &pool->tree);
  336 out:
  337 	return;
  338 }
  339 
  340 void rxe_add_key(void *arg, void *key)
  341 {
  342 	struct rxe_pool_entry *elem = arg;
  343 	struct rxe_pool *pool = elem->pool;
  344 	unsigned long flags;
  345 
  346 	spin_lock_irqsave(&pool->pool_lock, flags);
  347 	memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
  348 	insert_key(pool, elem);
  349 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  350 }
  351 
  352 void rxe_drop_key(void *arg)
  353 {
  354 	struct rxe_pool_entry *elem = arg;
  355 	struct rxe_pool *pool = elem->pool;
  356 	unsigned long flags;
  357 
  358 	spin_lock_irqsave(&pool->pool_lock, flags);
  359 	rb_erase(&elem->node, &pool->tree);
  360 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  361 }
  362 
  363 void rxe_add_index(void *arg)
  364 {
  365 	struct rxe_pool_entry *elem = arg;
  366 	struct rxe_pool *pool = elem->pool;
  367 	unsigned long flags;
  368 
  369 	spin_lock_irqsave(&pool->pool_lock, flags);
  370 	elem->index = alloc_index(pool);
  371 	insert_index(pool, elem);
  372 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  373 }
  374 
  375 void rxe_drop_index(void *arg)
  376 {
  377 	struct rxe_pool_entry *elem = arg;
  378 	struct rxe_pool *pool = elem->pool;
  379 	unsigned long flags;
  380 
  381 	spin_lock_irqsave(&pool->pool_lock, flags);
  382 	clear_bit(elem->index - pool->min_index, pool->table);
  383 	rb_erase(&elem->node, &pool->tree);
  384 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  385 }
  386 
  387 void *rxe_alloc(struct rxe_pool *pool)
  388 {
  389 	struct rxe_pool_entry *elem;
  390 	unsigned long flags;
  391 
  392 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
  393 
  394 	spin_lock_irqsave(&pool->pool_lock, flags);
  395 	if (pool->state != rxe_pool_valid) {
  396 		spin_unlock_irqrestore(&pool->pool_lock, flags);
  397 		return NULL;
  398 	}
  399 	kref_get(&pool->ref_cnt);
  400 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  401 
  402 	kref_get(&pool->rxe->ref_cnt);
  403 
  404 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
  405 		atomic_dec(&pool->num_elem);
  406 		rxe_dev_put(pool->rxe);
  407 		rxe_pool_put(pool);
  408 		return NULL;
  409 	}
  410 
  411 	elem = kmem_cache_zalloc(pool_cache(pool),
  412 				 (pool->flags & RXE_POOL_ATOMIC) ?
  413 				 GFP_ATOMIC : GFP_KERNEL);
  414 
  415 	elem->pool = pool;
  416 	kref_init(&elem->ref_cnt);
  417 
  418 	return elem;
  419 }
  420 
  421 void rxe_elem_release(struct kref *kref)
  422 {
  423 	struct rxe_pool_entry *elem =
  424 		container_of(kref, struct rxe_pool_entry, ref_cnt);
  425 	struct rxe_pool *pool = elem->pool;
  426 
  427 	if (pool->cleanup)
  428 		pool->cleanup(elem);
  429 
  430 	kmem_cache_free(pool_cache(pool), elem);
  431 	atomic_dec(&pool->num_elem);
  432 	rxe_dev_put(pool->rxe);
  433 	rxe_pool_put(pool);
  434 }
  435 
  436 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
  437 {
  438 	struct rb_node *node = NULL;
  439 	struct rxe_pool_entry *elem = NULL;
  440 	unsigned long flags;
  441 
  442 	spin_lock_irqsave(&pool->pool_lock, flags);
  443 
  444 	if (pool->state != rxe_pool_valid)
  445 		goto out;
  446 
  447 	node = pool->tree.rb_node;
  448 
  449 	while (node) {
  450 		elem = rb_entry(node, struct rxe_pool_entry, node);
  451 
  452 		if (elem->index > index)
  453 			node = node->rb_left;
  454 		else if (elem->index < index)
  455 			node = node->rb_right;
  456 		else
  457 			break;
  458 	}
  459 
  460 	if (node)
  461 		kref_get(&elem->ref_cnt);
  462 
  463 out:
  464 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  465 	return node ? (void *)elem : NULL;
  466 }
  467 
  468 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
  469 {
  470 	struct rb_node *node = NULL;
  471 	struct rxe_pool_entry *elem = NULL;
  472 	int cmp;
  473 	unsigned long flags;
  474 
  475 	spin_lock_irqsave(&pool->pool_lock, flags);
  476 
  477 	if (pool->state != rxe_pool_valid)
  478 		goto out;
  479 
  480 	node = pool->tree.rb_node;
  481 
  482 	while (node) {
  483 		elem = rb_entry(node, struct rxe_pool_entry, node);
  484 
  485 		cmp = memcmp((u8 *)elem + pool->key_offset,
  486 			     key, pool->key_size);
  487 
  488 		if (cmp > 0)
  489 			node = node->rb_left;
  490 		else if (cmp < 0)
  491 			node = node->rb_right;
  492 		else
  493 			break;
  494 	}
  495 
  496 	if (node)
  497 		kref_get(&elem->ref_cnt);
  498 
  499 out:
  500 	spin_unlock_irqrestore(&pool->pool_lock, flags);
  501 	return node ? ((void *)elem) : NULL;
  502 }           1 /*
    2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
    3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
    4  *
    5  * This software is available to you under a choice of one of two
    6  * licenses.  You may choose to be licensed under the terms of the GNU
    7  * General Public License (GPL) Version 2, available from the file
    8  * COPYING in the main directory of this source tree, or the
    9  * OpenIB.org BSD license below:
   10  *
   11  *     Redistribution and use in source and binary forms, with or
   12  *     without modification, are permitted provided that the following
   13  *     conditions are met:
   14  *
   15  *	- Redistributions of source code must retain the above
   16  *	  copyright notice, this list of conditions and the following
   17  *	  disclaimer.
   18  *
   19  *	- Redistributions in binary form must reproduce the above
   20  *	  copyright notice, this list of conditions and the following
   21  *	  disclaimer in the documentation and/or other materials
   22  *	  provided with the distribution.
   23  *
   24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   31  * SOFTWARE.
   32  */
   33 
   34 #include <linux/skbuff.h>
   35 
   36 #include "rxe.h"
   37 #include "rxe_loc.h"
   38 
   39 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
   40 			    struct rxe_qp *qp)
   41 {
   42 	if (unlikely(!qp->valid))
   43 		goto err1;
   44 
   45 	switch (qp_type(qp)) {
   46 	case IB_QPT_RC:
   47 		if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
   48 			pr_warn_ratelimited("bad qp type\n");
   49 			goto err1;
   50 		}
   51 		break;
   52 	case IB_QPT_UC:
   53 		if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
   54 			pr_warn_ratelimited("bad qp type\n");
   55 			goto err1;
   56 		}
   57 		break;
   58 	case IB_QPT_UD:
   59 	case IB_QPT_SMI:
   60 	case IB_QPT_GSI:
   61 		if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
   62 			pr_warn_ratelimited("bad qp type\n");
   63 			goto err1;
   64 		}
   65 		break;
   66 	default:
   67 		pr_warn_ratelimited("unsupported qp type\n");
   68 		goto err1;
   69 	}
   70 
   71 	if (pkt->mask & RXE_REQ_MASK) {
   72 		if (unlikely(qp->resp.state != QP_STATE_READY))
   73 			goto err1;
   74 	} else if (unlikely(qp->req.state < QP_STATE_READY ||
   75 				qp->req.state > QP_STATE_DRAINED)) {
   76 		goto err1;
   77 	}
   78 
   79 	return 0;
   80 
   81 err1:
   82 	return -EINVAL;
   83 }
   84 
   85 static void set_bad_pkey_cntr(struct rxe_port *port)
   86 {
   87 	spin_lock_bh(&port->port_lock);
   88 	port->attr.bad_pkey_cntr = min((u32)0xffff,
   89 				       port->attr.bad_pkey_cntr + 1);
   90 	spin_unlock_bh(&port->port_lock);
   91 }
   92 
   93 static void set_qkey_viol_cntr(struct rxe_port *port)
   94 {
   95 	spin_lock_bh(&port->port_lock);
   96 	port->attr.qkey_viol_cntr = min((u32)0xffff,
   97 					port->attr.qkey_viol_cntr + 1);
   98 	spin_unlock_bh(&port->port_lock);
   99 }
  100 
  101 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  102 		      u32 qpn, struct rxe_qp *qp)
  103 {
  104 	int i;
  105 	int found_pkey = 0;
  106 	struct rxe_port *port = &rxe->port;
  107 	u16 pkey = bth_pkey(pkt);
  108 
  109 	pkt->pkey_index = 0;
  110 
  111 	if (qpn == 1) {
  112 		for (i = 0; i < port->attr.pkey_tbl_len; i++) {
  113 			if (pkey_match(pkey, port->pkey_tbl[i])) {
  114 				pkt->pkey_index = i;
  115 				found_pkey = 1;
  116 				break;
  117 			}
  118 		}
  119 
  120 		if (!found_pkey) {
  121 			pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
  122 			set_bad_pkey_cntr(port);
  123 			goto err1;
  124 		}
  125 	} else if (qpn != 0) {
  126 		if (unlikely(!pkey_match(pkey,
  127 					 port->pkey_tbl[qp->attr.pkey_index]
  128 					))) {
  129 			pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey);
  130 			set_bad_pkey_cntr(port);
  131 			goto err1;
  132 		}
  133 		pkt->pkey_index = qp->attr.pkey_index;
  134 	}
  135 
  136 	if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
  137 	    qpn != 0 && pkt->mask) {
  138 		u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
  139 
  140 		if (unlikely(deth_qkey(pkt) != qkey)) {
  141 			pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
  142 					    deth_qkey(pkt), qkey, qpn);
  143 			set_qkey_viol_cntr(port);
  144 			goto err1;
  145 		}
  146 	}
  147 
  148 	return 0;
  149 
  150 err1:
  151 	return -EINVAL;
  152 }
  153 
  154 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  155 		      struct rxe_qp *qp)
  156 {
  157 	struct sk_buff *skb = PKT_TO_SKB(pkt);
  158 
  159 	if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
  160 		goto done;
  161 
  162 	if (unlikely(pkt->port_num != qp->attr.port_num)) {
  163 		pr_warn_ratelimited("port %d != qp port %d\n",
  164 				    pkt->port_num, qp->attr.port_num);
  165 		goto err1;
  166 	}
  167 
  168 	if (skb->protocol == htons(ETH_P_IP)) {
  169 		struct in_addr *saddr =
  170 			&qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
  171 		struct in_addr *daddr =
  172 			&qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
  173 
  174 		if (ip_hdr(skb)->daddr != saddr->s_addr) {
  175 			pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
  176 					    &ip_hdr(skb)->daddr,
  177 					    &saddr->s_addr);
  178 			goto err1;
  179 		}
  180 
  181 		if (ip_hdr(skb)->saddr != daddr->s_addr) {
  182 			pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
  183 					    &ip_hdr(skb)->saddr,
  184 					    &daddr->s_addr);
  185 			goto err1;
  186 		}
  187 
  188 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
  189 		struct in6_addr *saddr =
  190 			&qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
  191 		struct in6_addr *daddr =
  192 			&qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
  193 
  194 		if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
  195 			pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
  196 					    &ipv6_hdr(skb)->daddr, saddr);
  197 			goto err1;
  198 		}
  199 
  200 		if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
  201 			pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
  202 					    &ipv6_hdr(skb)->saddr, daddr);
  203 			goto err1;
  204 		}
  205 	}
  206 
  207 done:
  208 	return 0;
  209 
  210 err1:
  211 	return -EINVAL;
  212 }
  213 
  214 static int hdr_check(struct rxe_pkt_info *pkt)
  215 {
  216 	struct rxe_dev *rxe = pkt->rxe;
  217 	struct rxe_port *port = &rxe->port;
  218 	struct rxe_qp *qp = NULL;
  219 	u32 qpn = bth_qpn(pkt);
  220 	int index;
  221 	int err;
  222 
  223 	if (unlikely(bth_tver(pkt) != BTH_TVER)) {
  224 		pr_warn_ratelimited("bad tver\n");
  225 		goto err1;
  226 	}
  227 
  228 	if (qpn != IB_MULTICAST_QPN) {
  229 		index = (qpn == 0) ? port->qp_smi_index :
  230 			((qpn == 1) ? port->qp_gsi_index : qpn);
  231 		qp = rxe_pool_get_index(&rxe->qp_pool, index);
  232 		if (unlikely(!qp)) {
  233 			pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
  234 			goto err1;
  235 		}
  236 
  237 		err = check_type_state(rxe, pkt, qp);
  238 		if (unlikely(err))
  239 			goto err2;
  240 
  241 		err = check_addr(rxe, pkt, qp);
  242 		if (unlikely(err))
  243 			goto err2;
  244 
  245 		err = check_keys(rxe, pkt, qpn, qp);
  246 		if (unlikely(err))
  247 			goto err2;
  248 	} else {
  249 		if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
  250 			pr_warn_ratelimited("no grh for mcast qpn\n");
  251 			goto err1;
  252 		}
  253 	}
  254 
  255 	pkt->qp = qp;
  256 	return 0;
  257 
  258 err2:
  259 	if (qp)
  260 		rxe_drop_ref(qp);
  261 err1:
  262 	return -EINVAL;
  263 }
  264 
  265 static inline void rxe_rcv_pkt(struct rxe_dev *rxe,
  266 			       struct rxe_pkt_info *pkt,
  267 			       struct sk_buff *skb)
  268 {
  269 	if (pkt->mask & RXE_REQ_MASK)
  270 		rxe_resp_queue_pkt(rxe, pkt->qp, skb);
  271 	else
  272 		rxe_comp_queue_pkt(rxe, pkt->qp, skb);
  273 }
  274 
  275 static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
  276 {
  277 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  278 	struct rxe_mc_grp *mcg;
  279 	struct sk_buff *skb_copy;
  280 	struct rxe_mc_elem *mce;
  281 	struct rxe_qp *qp;
  282 	union ib_gid dgid;
  283 	int err;
  284 
  285 	if (skb->protocol == htons(ETH_P_IP))
  286 		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
  287 				       (struct in6_addr *)&dgid);
  288 	else if (skb->protocol == htons(ETH_P_IPV6))
  289 		memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
  290 
  291 	/* lookup mcast group corresponding to mgid, takes a ref */
  292 	mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
  293 	if (!mcg)
  294 		goto err1;	/* mcast group not registered */
  295 
  296 	spin_lock_bh(&mcg->mcg_lock);
  297 
  298 	list_for_each_entry(mce, &mcg->qp_list, qp_list) {
  299 		qp = mce->qp;
  300 		pkt = SKB_TO_PKT(skb);
  301 
  302 		/* validate qp for incoming packet */
  303 		err = check_type_state(rxe, pkt, qp);
  304 		if (err)
  305 			continue;
  306 
  307 		err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
  308 		if (err)
  309 			continue;
  310 
  311 		/* if *not* the last qp in the list
  312 		 * make a copy of the skb to post to the next qp
  313 		 */
  314 		skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
  315 				skb_clone(skb, GFP_KERNEL) : NULL;
  316 
  317 		pkt->qp = qp;
  318 		rxe_add_ref(qp);
  319 		rxe_rcv_pkt(rxe, pkt, skb);
  320 
  321 		skb = skb_copy;
  322 		if (!skb)
  323 			break;
  324 	}
  325 
  326 	spin_unlock_bh(&mcg->mcg_lock);
  327 
  328 	rxe_drop_ref(mcg);	/* drop ref from rxe_pool_get_key. */
  329 
  330 err1:
  331 	if (skb)
  332 		kfree_skb(skb);
  333 }
  334 
  335 static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
  336 {
  337 	union ib_gid dgid;
  338 	union ib_gid *pdgid;
  339 	u16 index;
  340 
  341 	if (skb->protocol == htons(ETH_P_IP)) {
  342 		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
  343 				       (struct in6_addr *)&dgid);
  344 		pdgid = &dgid;
  345 	} else {
  346 		pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
  347 	}
  348 
  349 	return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
  350 					  IB_GID_TYPE_ROCE_UDP_ENCAP,
  351 					  1, rxe->ndev, &index);
  352 }
  353 
  354 /* rxe_rcv is called from the interface driver */
  355 int rxe_rcv(struct sk_buff *skb)
  356 {
  357 	int err;
  358 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  359 	struct rxe_dev *rxe = pkt->rxe;
  360 	__be32 *icrcp;
  361 	u32 calc_icrc, pack_icrc;
  362 
  363 	pkt->offset = 0;
  364 
  365 	if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
  366 		goto drop;
  367 
  368 	if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
  369 		pr_warn_ratelimited("failed matching dgid\n");
  370 		goto drop;
  371 	}
  372 
  373 	pkt->opcode = bth_opcode(pkt);
  374 	pkt->psn = bth_psn(pkt);
  375 	pkt->qp = NULL;
  376 	pkt->mask |= rxe_opcode[pkt->opcode].mask;
  377 
  378 	if (unlikely(skb->len < header_size(pkt)))
  379 		goto drop;
  380 
  381 	err = hdr_check(pkt);
  382 	if (unlikely(err))
  383 		goto drop;
  384 
  385 	/* Verify ICRC */
  386 	icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
  387 	pack_icrc = be32_to_cpu(*icrcp);
  388 
  389 	calc_icrc = rxe_icrc_hdr(pkt, skb);
  390 	calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt));
  391 	calc_icrc = cpu_to_be32(~calc_icrc);
  392 	if (unlikely(calc_icrc != pack_icrc)) {
  393 		char saddr[sizeof(struct in6_addr)];
  394 
  395 		if (skb->protocol == htons(ETH_P_IPV6))
  396 			sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
  397 		else if (skb->protocol == htons(ETH_P_IP))
  398 			sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
  399 		else
  400 			sprintf(saddr, "unknown");
  401 
  402 		pr_warn_ratelimited("bad ICRC from %s\n", saddr);
  403 		goto drop;
  404 	}
  405 
  406 	if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
  407 		rxe_rcv_mcast_pkt(rxe, skb);
  408 	else
  409 		rxe_rcv_pkt(rxe, pkt, skb);
  410 
  411 	return 0;
  412 
  413 drop:
  414 	if (pkt->qp)
  415 		rxe_drop_ref(pkt->qp);
  416 
  417 	kfree_skb(skb);
  418 	return 0;
  419 }
  420 EXPORT_SYMBOL(rxe_rcv);           1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 extern void ldv_spin_lock(void);
    9 extern void ldv_spin_unlock(void);
   10 extern int ldv_spin_trylock(void);
   11 
   12 #include <linux/kernel.h>
   13 #include <verifier/rcv.h>
   14 #include <linux/module.h>
   15 #include <linux/slab.h>
   16 
   17 extern void *ldv_undefined_pointer(void);
   18 extern void ldv_check_alloc_flags(gfp_t flags);
   19 extern void ldv_check_alloc_nonatomic(void);
   20 /* Returns an arbitrary page in addition to checking flags */
   21 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
   22 #line 1 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_recv.c"
   23 /*
   24  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   25  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   26  *
   27  * This software is available to you under a choice of one of two
   28  * licenses.  You may choose to be licensed under the terms of the GNU
   29  * General Public License (GPL) Version 2, available from the file
   30  * COPYING in the main directory of this source tree, or the
   31  * OpenIB.org BSD license below:
   32  *
   33  *     Redistribution and use in source and binary forms, with or
   34  *     without modification, are permitted provided that the following
   35  *     conditions are met:
   36  *
   37  *	- Redistributions of source code must retain the above
   38  *	  copyright notice, this list of conditions and the following
   39  *	  disclaimer.
   40  *
   41  *	- Redistributions in binary form must reproduce the above
   42  *	  copyright notice, this list of conditions and the following
   43  *	  disclaimer in the documentation and/or other materials
   44  *	  provided with the distribution.
   45  *
   46  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   47  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   48  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   49  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   50  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   51  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   52  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   53  * SOFTWARE.
   54  */
   55 
   56 #include <linux/skbuff.h>
   57 
   58 #include "rxe.h"
   59 #include "rxe_loc.h"
   60 
   61 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
   62 			    struct rxe_qp *qp)
   63 {
   64 	if (unlikely(!qp->valid))
   65 		goto err1;
   66 
   67 	switch (qp_type(qp)) {
   68 	case IB_QPT_RC:
   69 		if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
   70 			pr_warn_ratelimited("bad qp type\n");
   71 			goto err1;
   72 		}
   73 		break;
   74 	case IB_QPT_UC:
   75 		if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
   76 			pr_warn_ratelimited("bad qp type\n");
   77 			goto err1;
   78 		}
   79 		break;
   80 	case IB_QPT_UD:
   81 	case IB_QPT_SMI:
   82 	case IB_QPT_GSI:
   83 		if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
   84 			pr_warn_ratelimited("bad qp type\n");
   85 			goto err1;
   86 		}
   87 		break;
   88 	default:
   89 		pr_warn_ratelimited("unsupported qp type\n");
   90 		goto err1;
   91 	}
   92 
   93 	if (pkt->mask & RXE_REQ_MASK) {
   94 		if (unlikely(qp->resp.state != QP_STATE_READY))
   95 			goto err1;
   96 	} else if (unlikely(qp->req.state < QP_STATE_READY ||
   97 				qp->req.state > QP_STATE_DRAINED)) {
   98 		goto err1;
   99 	}
  100 
  101 	return 0;
  102 
  103 err1:
  104 	return -EINVAL;
  105 }
  106 
  107 static void set_bad_pkey_cntr(struct rxe_port *port)
  108 {
  109 	spin_lock_bh(&port->port_lock);
  110 	port->attr.bad_pkey_cntr = min((u32)0xffff,
  111 				       port->attr.bad_pkey_cntr + 1);
  112 	spin_unlock_bh(&port->port_lock);
  113 }
  114 
  115 static void set_qkey_viol_cntr(struct rxe_port *port)
  116 {
  117 	spin_lock_bh(&port->port_lock);
  118 	port->attr.qkey_viol_cntr = min((u32)0xffff,
  119 					port->attr.qkey_viol_cntr + 1);
  120 	spin_unlock_bh(&port->port_lock);
  121 }
  122 
  123 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  124 		      u32 qpn, struct rxe_qp *qp)
  125 {
  126 	int i;
  127 	int found_pkey = 0;
  128 	struct rxe_port *port = &rxe->port;
  129 	u16 pkey = bth_pkey(pkt);
  130 
  131 	pkt->pkey_index = 0;
  132 
  133 	if (qpn == 1) {
  134 		for (i = 0; i < port->attr.pkey_tbl_len; i++) {
  135 			if (pkey_match(pkey, port->pkey_tbl[i])) {
  136 				pkt->pkey_index = i;
  137 				found_pkey = 1;
  138 				break;
  139 			}
  140 		}
  141 
  142 		if (!found_pkey) {
  143 			pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
  144 			set_bad_pkey_cntr(port);
  145 			goto err1;
  146 		}
  147 	} else if (qpn != 0) {
  148 		if (unlikely(!pkey_match(pkey,
  149 					 port->pkey_tbl[qp->attr.pkey_index]
  150 					))) {
  151 			pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey);
  152 			set_bad_pkey_cntr(port);
  153 			goto err1;
  154 		}
  155 		pkt->pkey_index = qp->attr.pkey_index;
  156 	}
  157 
  158 	if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
  159 	    qpn != 0 && pkt->mask) {
  160 		u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
  161 
  162 		if (unlikely(deth_qkey(pkt) != qkey)) {
  163 			pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
  164 					    deth_qkey(pkt), qkey, qpn);
  165 			set_qkey_viol_cntr(port);
  166 			goto err1;
  167 		}
  168 	}
  169 
  170 	return 0;
  171 
  172 err1:
  173 	return -EINVAL;
  174 }
  175 
  176 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  177 		      struct rxe_qp *qp)
  178 {
  179 	struct sk_buff *skb = PKT_TO_SKB(pkt);
  180 
  181 	if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
  182 		goto done;
  183 
  184 	if (unlikely(pkt->port_num != qp->attr.port_num)) {
  185 		pr_warn_ratelimited("port %d != qp port %d\n",
  186 				    pkt->port_num, qp->attr.port_num);
  187 		goto err1;
  188 	}
  189 
  190 	if (skb->protocol == htons(ETH_P_IP)) {
  191 		struct in_addr *saddr =
  192 			&qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
  193 		struct in_addr *daddr =
  194 			&qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
  195 
  196 		if (ip_hdr(skb)->daddr != saddr->s_addr) {
  197 			pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
  198 					    &ip_hdr(skb)->daddr,
  199 					    &saddr->s_addr);
  200 			goto err1;
  201 		}
  202 
  203 		if (ip_hdr(skb)->saddr != daddr->s_addr) {
  204 			pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
  205 					    &ip_hdr(skb)->saddr,
  206 					    &daddr->s_addr);
  207 			goto err1;
  208 		}
  209 
  210 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
  211 		struct in6_addr *saddr =
  212 			&qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
  213 		struct in6_addr *daddr =
  214 			&qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
  215 
  216 		if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
  217 			pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
  218 					    &ipv6_hdr(skb)->daddr, saddr);
  219 			goto err1;
  220 		}
  221 
  222 		if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
  223 			pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
  224 					    &ipv6_hdr(skb)->saddr, daddr);
  225 			goto err1;
  226 		}
  227 	}
  228 
  229 done:
  230 	return 0;
  231 
  232 err1:
  233 	return -EINVAL;
  234 }
  235 
  236 static int hdr_check(struct rxe_pkt_info *pkt)
  237 {
  238 	struct rxe_dev *rxe = pkt->rxe;
  239 	struct rxe_port *port = &rxe->port;
  240 	struct rxe_qp *qp = NULL;
  241 	u32 qpn = bth_qpn(pkt);
  242 	int index;
  243 	int err;
  244 
  245 	if (unlikely(bth_tver(pkt) != BTH_TVER)) {
  246 		pr_warn_ratelimited("bad tver\n");
  247 		goto err1;
  248 	}
  249 
  250 	if (qpn != IB_MULTICAST_QPN) {
  251 		index = (qpn == 0) ? port->qp_smi_index :
  252 			((qpn == 1) ? port->qp_gsi_index : qpn);
  253 		qp = rxe_pool_get_index(&rxe->qp_pool, index);
  254 		if (unlikely(!qp)) {
  255 			pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
  256 			goto err1;
  257 		}
  258 
  259 		err = check_type_state(rxe, pkt, qp);
  260 		if (unlikely(err))
  261 			goto err2;
  262 
  263 		err = check_addr(rxe, pkt, qp);
  264 		if (unlikely(err))
  265 			goto err2;
  266 
  267 		err = check_keys(rxe, pkt, qpn, qp);
  268 		if (unlikely(err))
  269 			goto err2;
  270 	} else {
  271 		if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
  272 			pr_warn_ratelimited("no grh for mcast qpn\n");
  273 			goto err1;
  274 		}
  275 	}
  276 
  277 	pkt->qp = qp;
  278 	return 0;
  279 
  280 err2:
  281 	if (qp)
  282 		rxe_drop_ref(qp);
  283 err1:
  284 	return -EINVAL;
  285 }
  286 
  287 static inline void rxe_rcv_pkt(struct rxe_dev *rxe,
  288 			       struct rxe_pkt_info *pkt,
  289 			       struct sk_buff *skb)
  290 {
  291 	if (pkt->mask & RXE_REQ_MASK)
  292 		rxe_resp_queue_pkt(rxe, pkt->qp, skb);
  293 	else
  294 		rxe_comp_queue_pkt(rxe, pkt->qp, skb);
  295 }
  296 
  297 static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
  298 {
  299 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  300 	struct rxe_mc_grp *mcg;
  301 	struct sk_buff *skb_copy;
  302 	struct rxe_mc_elem *mce;
  303 	struct rxe_qp *qp;
  304 	union ib_gid dgid;
  305 	int err;
  306 
  307 	if (skb->protocol == htons(ETH_P_IP))
  308 		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
  309 				       (struct in6_addr *)&dgid);
  310 	else if (skb->protocol == htons(ETH_P_IPV6))
  311 		memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
  312 
  313 	/* lookup mcast group corresponding to mgid, takes a ref */
  314 	mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
  315 	if (!mcg)
  316 		goto err1;	/* mcast group not registered */
  317 
  318 	spin_lock_bh(&mcg->mcg_lock);
  319 
  320 	list_for_each_entry(mce, &mcg->qp_list, qp_list) {
  321 		qp = mce->qp;
  322 		pkt = SKB_TO_PKT(skb);
  323 
  324 		/* validate qp for incoming packet */
  325 		err = check_type_state(rxe, pkt, qp);
  326 		if (err)
  327 			continue;
  328 
  329 		err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
  330 		if (err)
  331 			continue;
  332 
  333 		/* if *not* the last qp in the list
  334 		 * make a copy of the skb to post to the next qp
  335 		 */
  336 		skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
  337 				skb_clone(skb, GFP_KERNEL) : NULL;
  338 
  339 		pkt->qp = qp;
  340 		rxe_add_ref(qp);
  341 		rxe_rcv_pkt(rxe, pkt, skb);
  342 
  343 		skb = skb_copy;
  344 		if (!skb)
  345 			break;
  346 	}
  347 
  348 	spin_unlock_bh(&mcg->mcg_lock);
  349 
  350 	rxe_drop_ref(mcg);	/* drop ref from rxe_pool_get_key. */
  351 
  352 err1:
  353 	if (skb)
  354 		kfree_skb(skb);
  355 }
  356 
  357 static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
  358 {
  359 	union ib_gid dgid;
  360 	union ib_gid *pdgid;
  361 	u16 index;
  362 
  363 	if (skb->protocol == htons(ETH_P_IP)) {
  364 		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
  365 				       (struct in6_addr *)&dgid);
  366 		pdgid = &dgid;
  367 	} else {
  368 		pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
  369 	}
  370 
  371 	return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
  372 					  IB_GID_TYPE_ROCE_UDP_ENCAP,
  373 					  1, rxe->ndev, &index);
  374 }
  375 
  376 /* rxe_rcv is called from the interface driver */
  377 int rxe_rcv(struct sk_buff *skb)
  378 {
  379 	int err;
  380 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  381 	struct rxe_dev *rxe = pkt->rxe;
  382 	__be32 *icrcp;
  383 	u32 calc_icrc, pack_icrc;
  384 
  385 	pkt->offset = 0;
  386 
  387 	if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
  388 		goto drop;
  389 
  390 	if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
  391 		pr_warn_ratelimited("failed matching dgid\n");
  392 		goto drop;
  393 	}
  394 
  395 	pkt->opcode = bth_opcode(pkt);
  396 	pkt->psn = bth_psn(pkt);
  397 	pkt->qp = NULL;
  398 	pkt->mask |= rxe_opcode[pkt->opcode].mask;
  399 
  400 	if (unlikely(skb->len < header_size(pkt)))
  401 		goto drop;
  402 
  403 	err = hdr_check(pkt);
  404 	if (unlikely(err))
  405 		goto drop;
  406 
  407 	/* Verify ICRC */
  408 	icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
  409 	pack_icrc = be32_to_cpu(*icrcp);
  410 
  411 	calc_icrc = rxe_icrc_hdr(pkt, skb);
  412 	calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt));
  413 	calc_icrc = cpu_to_be32(~calc_icrc);
  414 	if (unlikely(calc_icrc != pack_icrc)) {
  415 		char saddr[sizeof(struct in6_addr)];
  416 
  417 		if (skb->protocol == htons(ETH_P_IPV6))
  418 			sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
  419 		else if (skb->protocol == htons(ETH_P_IP))
  420 			sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
  421 		else
  422 			sprintf(saddr, "unknown");
  423 
  424 		pr_warn_ratelimited("bad ICRC from %s\n", saddr);
  425 		goto drop;
  426 	}
  427 
  428 	if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
  429 		rxe_rcv_mcast_pkt(rxe, skb);
  430 	else
  431 		rxe_rcv_pkt(rxe, pkt, skb);
  432 
  433 	return 0;
  434 
  435 drop:
  436 	if (pkt->qp)
  437 		rxe_drop_ref(pkt->qp);
  438 
  439 	kfree_skb(skb);
  440 	return 0;
  441 }
  442 EXPORT_SYMBOL(rxe_rcv);
  443 
  444 #line 22 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_recv.o.c.prepared"           1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 extern void ldv_spin_lock(void);
    9 extern void ldv_spin_unlock(void);
   10 extern int ldv_spin_trylock(void);
   11 
   12 #include <linux/kernel.h>
   13 #include <verifier/rcv.h>
   14 #include <linux/module.h>
   15 #include <linux/slab.h>
   16 
   17 extern void *ldv_undefined_pointer(void);
   18 extern void ldv_check_alloc_flags(gfp_t flags);
   19 extern void ldv_check_alloc_nonatomic(void);
   20 /* Returns an arbitrary page in addition to checking flags */
   21 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
   22 #line 1 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_resp.c"
   23 /*
   24  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   25  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   26  *
   27  * This software is available to you under a choice of one of two
   28  * licenses.  You may choose to be licensed under the terms of the GNU
   29  * General Public License (GPL) Version 2, available from the file
   30  * COPYING in the main directory of this source tree, or the
   31  * OpenIB.org BSD license below:
   32  *
   33  *     Redistribution and use in source and binary forms, with or
   34  *     without modification, are permitted provided that the following
   35  *     conditions are met:
   36  *
   37  *	- Redistributions of source code must retain the above
   38  *	  copyright notice, this list of conditions and the following
   39  *	  disclaimer.
   40  *
   41  *	- Redistributions in binary form must reproduce the above
   42  *	  copyright notice, this list of conditions and the following
   43  *	  disclaimer in the documentation and/or other materials
   44  *	  provided with the distribution.
   45  *
   46  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   47  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   48  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   49  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   50  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   51  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   52  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   53  * SOFTWARE.
   54  */
   55 
   56 #include <linux/skbuff.h>
   57 
   58 #include "rxe.h"
   59 #include "rxe_loc.h"
   60 #include "rxe_queue.h"
   61 
   62 enum resp_states {
   63 	RESPST_NONE,
   64 	RESPST_GET_REQ,
   65 	RESPST_CHK_PSN,
   66 	RESPST_CHK_OP_SEQ,
   67 	RESPST_CHK_OP_VALID,
   68 	RESPST_CHK_RESOURCE,
   69 	RESPST_CHK_LENGTH,
   70 	RESPST_CHK_RKEY,
   71 	RESPST_EXECUTE,
   72 	RESPST_READ_REPLY,
   73 	RESPST_COMPLETE,
   74 	RESPST_ACKNOWLEDGE,
   75 	RESPST_CLEANUP,
   76 	RESPST_DUPLICATE_REQUEST,
   77 	RESPST_ERR_MALFORMED_WQE,
   78 	RESPST_ERR_UNSUPPORTED_OPCODE,
   79 	RESPST_ERR_MISALIGNED_ATOMIC,
   80 	RESPST_ERR_PSN_OUT_OF_SEQ,
   81 	RESPST_ERR_MISSING_OPCODE_FIRST,
   82 	RESPST_ERR_MISSING_OPCODE_LAST_C,
   83 	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
   84 	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
   85 	RESPST_ERR_RNR,
   86 	RESPST_ERR_RKEY_VIOLATION,
   87 	RESPST_ERR_LENGTH,
   88 	RESPST_ERR_CQ_OVERFLOW,
   89 	RESPST_ERROR,
   90 	RESPST_RESET,
   91 	RESPST_DONE,
   92 	RESPST_EXIT,
   93 };
   94 
   95 static char *resp_state_name[] = {
   96 	[RESPST_NONE]				= "NONE",
   97 	[RESPST_GET_REQ]			= "GET_REQ",
   98 	[RESPST_CHK_PSN]			= "CHK_PSN",
   99 	[RESPST_CHK_OP_SEQ]			= "CHK_OP_SEQ",
  100 	[RESPST_CHK_OP_VALID]			= "CHK_OP_VALID",
  101 	[RESPST_CHK_RESOURCE]			= "CHK_RESOURCE",
  102 	[RESPST_CHK_LENGTH]			= "CHK_LENGTH",
  103 	[RESPST_CHK_RKEY]			= "CHK_RKEY",
  104 	[RESPST_EXECUTE]			= "EXECUTE",
  105 	[RESPST_READ_REPLY]			= "READ_REPLY",
  106 	[RESPST_COMPLETE]			= "COMPLETE",
  107 	[RESPST_ACKNOWLEDGE]			= "ACKNOWLEDGE",
  108 	[RESPST_CLEANUP]			= "CLEANUP",
  109 	[RESPST_DUPLICATE_REQUEST]		= "DUPLICATE_REQUEST",
  110 	[RESPST_ERR_MALFORMED_WQE]		= "ERR_MALFORMED_WQE",
  111 	[RESPST_ERR_UNSUPPORTED_OPCODE]		= "ERR_UNSUPPORTED_OPCODE",
  112 	[RESPST_ERR_MISALIGNED_ATOMIC]		= "ERR_MISALIGNED_ATOMIC",
  113 	[RESPST_ERR_PSN_OUT_OF_SEQ]		= "ERR_PSN_OUT_OF_SEQ",
  114 	[RESPST_ERR_MISSING_OPCODE_FIRST]	= "ERR_MISSING_OPCODE_FIRST",
  115 	[RESPST_ERR_MISSING_OPCODE_LAST_C]	= "ERR_MISSING_OPCODE_LAST_C",
  116 	[RESPST_ERR_MISSING_OPCODE_LAST_D1E]	= "ERR_MISSING_OPCODE_LAST_D1E",
  117 	[RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]	= "ERR_TOO_MANY_RDMA_ATM_REQ",
  118 	[RESPST_ERR_RNR]			= "ERR_RNR",
  119 	[RESPST_ERR_RKEY_VIOLATION]		= "ERR_RKEY_VIOLATION",
  120 	[RESPST_ERR_LENGTH]			= "ERR_LENGTH",
  121 	[RESPST_ERR_CQ_OVERFLOW]		= "ERR_CQ_OVERFLOW",
  122 	[RESPST_ERROR]				= "ERROR",
  123 	[RESPST_RESET]				= "RESET",
  124 	[RESPST_DONE]				= "DONE",
  125 	[RESPST_EXIT]				= "EXIT",
  126 };
  127 
  128 /* rxe_recv calls here to add a request packet to the input queue */
  129 void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
  130 			struct sk_buff *skb)
  131 {
  132 	int must_sched;
  133 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  134 
  135 	skb_queue_tail(&qp->req_pkts, skb);
  136 
  137 	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
  138 			(skb_queue_len(&qp->req_pkts) > 1);
  139 
  140 	rxe_run_task(&qp->resp.task, must_sched);
  141 }
  142 
  143 static inline enum resp_states get_req(struct rxe_qp *qp,
  144 				       struct rxe_pkt_info **pkt_p)
  145 {
  146 	struct sk_buff *skb;
  147 
  148 	if (qp->resp.state == QP_STATE_ERROR) {
  149 		skb = skb_dequeue(&qp->req_pkts);
  150 		if (skb) {
  151 			/* drain request packet queue */
  152 			rxe_drop_ref(qp);
  153 			kfree_skb(skb);
  154 			return RESPST_GET_REQ;
  155 		}
  156 
  157 		/* go drain recv wr queue */
  158 		return RESPST_CHK_RESOURCE;
  159 	}
  160 
  161 	skb = skb_peek(&qp->req_pkts);
  162 	if (!skb)
  163 		return RESPST_EXIT;
  164 
  165 	*pkt_p = SKB_TO_PKT(skb);
  166 
  167 	return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
  168 }
  169 
  170 static enum resp_states check_psn(struct rxe_qp *qp,
  171 				  struct rxe_pkt_info *pkt)
  172 {
  173 	int diff = psn_compare(pkt->psn, qp->resp.psn);
  174 
  175 	switch (qp_type(qp)) {
  176 	case IB_QPT_RC:
  177 		if (diff > 0) {
  178 			if (qp->resp.sent_psn_nak)
  179 				return RESPST_CLEANUP;
  180 
  181 			qp->resp.sent_psn_nak = 1;
  182 			return RESPST_ERR_PSN_OUT_OF_SEQ;
  183 
  184 		} else if (diff < 0) {
  185 			return RESPST_DUPLICATE_REQUEST;
  186 		}
  187 
  188 		if (qp->resp.sent_psn_nak)
  189 			qp->resp.sent_psn_nak = 0;
  190 
  191 		break;
  192 
  193 	case IB_QPT_UC:
  194 		if (qp->resp.drop_msg || diff != 0) {
  195 			if (pkt->mask & RXE_START_MASK) {
  196 				qp->resp.drop_msg = 0;
  197 				return RESPST_CHK_OP_SEQ;
  198 			}
  199 
  200 			qp->resp.drop_msg = 1;
  201 			return RESPST_CLEANUP;
  202 		}
  203 		break;
  204 	default:
  205 		break;
  206 	}
  207 
  208 	return RESPST_CHK_OP_SEQ;
  209 }
  210 
  211 static enum resp_states check_op_seq(struct rxe_qp *qp,
  212 				     struct rxe_pkt_info *pkt)
  213 {
  214 	switch (qp_type(qp)) {
  215 	case IB_QPT_RC:
  216 		switch (qp->resp.opcode) {
  217 		case IB_OPCODE_RC_SEND_FIRST:
  218 		case IB_OPCODE_RC_SEND_MIDDLE:
  219 			switch (pkt->opcode) {
  220 			case IB_OPCODE_RC_SEND_MIDDLE:
  221 			case IB_OPCODE_RC_SEND_LAST:
  222 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
  223 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
  224 				return RESPST_CHK_OP_VALID;
  225 			default:
  226 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
  227 			}
  228 
  229 		case IB_OPCODE_RC_RDMA_WRITE_FIRST:
  230 		case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
  231 			switch (pkt->opcode) {
  232 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
  233 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
  234 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  235 				return RESPST_CHK_OP_VALID;
  236 			default:
  237 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
  238 			}
  239 
  240 		default:
  241 			switch (pkt->opcode) {
  242 			case IB_OPCODE_RC_SEND_MIDDLE:
  243 			case IB_OPCODE_RC_SEND_LAST:
  244 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
  245 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
  246 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
  247 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
  248 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  249 				return RESPST_ERR_MISSING_OPCODE_FIRST;
  250 			default:
  251 				return RESPST_CHK_OP_VALID;
  252 			}
  253 		}
  254 		break;
  255 
  256 	case IB_QPT_UC:
  257 		switch (qp->resp.opcode) {
  258 		case IB_OPCODE_UC_SEND_FIRST:
  259 		case IB_OPCODE_UC_SEND_MIDDLE:
  260 			switch (pkt->opcode) {
  261 			case IB_OPCODE_UC_SEND_MIDDLE:
  262 			case IB_OPCODE_UC_SEND_LAST:
  263 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
  264 				return RESPST_CHK_OP_VALID;
  265 			default:
  266 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
  267 			}
  268 
  269 		case IB_OPCODE_UC_RDMA_WRITE_FIRST:
  270 		case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
  271 			switch (pkt->opcode) {
  272 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
  273 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
  274 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  275 				return RESPST_CHK_OP_VALID;
  276 			default:
  277 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
  278 			}
  279 
  280 		default:
  281 			switch (pkt->opcode) {
  282 			case IB_OPCODE_UC_SEND_MIDDLE:
  283 			case IB_OPCODE_UC_SEND_LAST:
  284 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
  285 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
  286 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
  287 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  288 				qp->resp.drop_msg = 1;
  289 				return RESPST_CLEANUP;
  290 			default:
  291 				return RESPST_CHK_OP_VALID;
  292 			}
  293 		}
  294 		break;
  295 
  296 	default:
  297 		return RESPST_CHK_OP_VALID;
  298 	}
  299 }
  300 
  301 static enum resp_states check_op_valid(struct rxe_qp *qp,
  302 				       struct rxe_pkt_info *pkt)
  303 {
  304 	switch (qp_type(qp)) {
  305 	case IB_QPT_RC:
  306 		if (((pkt->mask & RXE_READ_MASK) &&
  307 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
  308 		    ((pkt->mask & RXE_WRITE_MASK) &&
  309 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
  310 		    ((pkt->mask & RXE_ATOMIC_MASK) &&
  311 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
  312 			return RESPST_ERR_UNSUPPORTED_OPCODE;
  313 		}
  314 
  315 		break;
  316 
  317 	case IB_QPT_UC:
  318 		if ((pkt->mask & RXE_WRITE_MASK) &&
  319 		    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
  320 			qp->resp.drop_msg = 1;
  321 			return RESPST_CLEANUP;
  322 		}
  323 
  324 		break;
  325 
  326 	case IB_QPT_UD:
  327 	case IB_QPT_SMI:
  328 	case IB_QPT_GSI:
  329 		break;
  330 
  331 	default:
  332 		WARN_ON(1);
  333 		break;
  334 	}
  335 
  336 	return RESPST_CHK_RESOURCE;
  337 }
  338 
  339 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
  340 {
  341 	struct rxe_srq *srq = qp->srq;
  342 	struct rxe_queue *q = srq->rq.queue;
  343 	struct rxe_recv_wqe *wqe;
  344 	struct ib_event ev;
  345 
  346 	if (srq->error)
  347 		return RESPST_ERR_RNR;
  348 
  349 	spin_lock_bh(&srq->rq.consumer_lock);
  350 
  351 	wqe = queue_head(q);
  352 	if (!wqe) {
  353 		spin_unlock_bh(&srq->rq.consumer_lock);
  354 		return RESPST_ERR_RNR;
  355 	}
  356 
  357 	/* note kernel and user space recv wqes have same size */
  358 	memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
  359 
  360 	qp->resp.wqe = &qp->resp.srq_wqe.wqe;
  361 	advance_consumer(q);
  362 
  363 	if (srq->limit && srq->ibsrq.event_handler &&
  364 	    (queue_count(q) < srq->limit)) {
  365 		srq->limit = 0;
  366 		goto event;
  367 	}
  368 
  369 	spin_unlock_bh(&srq->rq.consumer_lock);
  370 	return RESPST_CHK_LENGTH;
  371 
  372 event:
  373 	spin_unlock_bh(&srq->rq.consumer_lock);
  374 	ev.device = qp->ibqp.device;
  375 	ev.element.srq = qp->ibqp.srq;
  376 	ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
  377 	srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
  378 	return RESPST_CHK_LENGTH;
  379 }
  380 
  381 static enum resp_states check_resource(struct rxe_qp *qp,
  382 				       struct rxe_pkt_info *pkt)
  383 {
  384 	struct rxe_srq *srq = qp->srq;
  385 
  386 	if (qp->resp.state == QP_STATE_ERROR) {
  387 		if (qp->resp.wqe) {
  388 			qp->resp.status = IB_WC_WR_FLUSH_ERR;
  389 			return RESPST_COMPLETE;
  390 		} else if (!srq) {
  391 			qp->resp.wqe = queue_head(qp->rq.queue);
  392 			if (qp->resp.wqe) {
  393 				qp->resp.status = IB_WC_WR_FLUSH_ERR;
  394 				return RESPST_COMPLETE;
  395 			} else {
  396 				return RESPST_EXIT;
  397 			}
  398 		} else {
  399 			return RESPST_EXIT;
  400 		}
  401 	}
  402 
  403 	if (pkt->mask & RXE_READ_OR_ATOMIC) {
  404 		/* it is the requesters job to not send
  405 		 * too many read/atomic ops, we just
  406 		 * recycle the responder resource queue
  407 		 */
  408 		if (likely(qp->attr.max_rd_atomic > 0))
  409 			return RESPST_CHK_LENGTH;
  410 		else
  411 			return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
  412 	}
  413 
  414 	if (pkt->mask & RXE_RWR_MASK) {
  415 		if (srq)
  416 			return get_srq_wqe(qp);
  417 
  418 		qp->resp.wqe = queue_head(qp->rq.queue);
  419 		return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
  420 	}
  421 
  422 	return RESPST_CHK_LENGTH;
  423 }
  424 
  425 static enum resp_states check_length(struct rxe_qp *qp,
  426 				     struct rxe_pkt_info *pkt)
  427 {
  428 	switch (qp_type(qp)) {
  429 	case IB_QPT_RC:
  430 		return RESPST_CHK_RKEY;
  431 
  432 	case IB_QPT_UC:
  433 		return RESPST_CHK_RKEY;
  434 
  435 	default:
  436 		return RESPST_CHK_RKEY;
  437 	}
  438 }
  439 
  440 static enum resp_states check_rkey(struct rxe_qp *qp,
  441 				   struct rxe_pkt_info *pkt)
  442 {
  443 	struct rxe_mem *mem;
  444 	u64 va;
  445 	u32 rkey;
  446 	u32 resid;
  447 	u32 pktlen;
  448 	int mtu = qp->mtu;
  449 	enum resp_states state;
  450 	int access;
  451 
  452 	if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
  453 		if (pkt->mask & RXE_RETH_MASK) {
  454 			qp->resp.va = reth_va(pkt);
  455 			qp->resp.rkey = reth_rkey(pkt);
  456 			qp->resp.resid = reth_len(pkt);
  457 		}
  458 		access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
  459 						     : IB_ACCESS_REMOTE_WRITE;
  460 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
  461 		qp->resp.va = atmeth_va(pkt);
  462 		qp->resp.rkey = atmeth_rkey(pkt);
  463 		qp->resp.resid = sizeof(u64);
  464 		access = IB_ACCESS_REMOTE_ATOMIC;
  465 	} else {
  466 		return RESPST_EXECUTE;
  467 	}
  468 
  469 	va	= qp->resp.va;
  470 	rkey	= qp->resp.rkey;
  471 	resid	= qp->resp.resid;
  472 	pktlen	= payload_size(pkt);
  473 
  474 	mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
  475 	if (!mem) {
  476 		state = RESPST_ERR_RKEY_VIOLATION;
  477 		goto err1;
  478 	}
  479 
  480 	if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
  481 		state = RESPST_ERR_RKEY_VIOLATION;
  482 		goto err1;
  483 	}
  484 
  485 	if (mem_check_range(mem, va, resid)) {
  486 		state = RESPST_ERR_RKEY_VIOLATION;
  487 		goto err2;
  488 	}
  489 
  490 	if (pkt->mask & RXE_WRITE_MASK)	 {
  491 		if (resid > mtu) {
  492 			if (pktlen != mtu || bth_pad(pkt)) {
  493 				state = RESPST_ERR_LENGTH;
  494 				goto err2;
  495 			}
  496 
  497 			resid = mtu;
  498 		} else {
  499 			if (pktlen != resid) {
  500 				state = RESPST_ERR_LENGTH;
  501 				goto err2;
  502 			}
  503 			if ((bth_pad(pkt) != (0x3 & (-resid)))) {
  504 				/* This case may not be exactly that
  505 				 * but nothing else fits.
  506 				 */
  507 				state = RESPST_ERR_LENGTH;
  508 				goto err2;
  509 			}
  510 		}
  511 	}
  512 
  513 	WARN_ON(qp->resp.mr);
  514 
  515 	qp->resp.mr = mem;
  516 	return RESPST_EXECUTE;
  517 
  518 err2:
  519 	rxe_drop_ref(mem);
  520 err1:
  521 	return state;
  522 }
  523 
  524 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
  525 				     int data_len)
  526 {
  527 	int err;
  528 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  529 
  530 	err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
  531 			data_addr, data_len, to_mem_obj, NULL);
  532 	if (unlikely(err))
  533 		return (err == -ENOSPC) ? RESPST_ERR_LENGTH
  534 					: RESPST_ERR_MALFORMED_WQE;
  535 
  536 	return RESPST_NONE;
  537 }
  538 
  539 static enum resp_states write_data_in(struct rxe_qp *qp,
  540 				      struct rxe_pkt_info *pkt)
  541 {
  542 	enum resp_states rc = RESPST_NONE;
  543 	int	err;
  544 	int data_len = payload_size(pkt);
  545 
  546 	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
  547 			   data_len, to_mem_obj, NULL);
  548 	if (err) {
  549 		rc = RESPST_ERR_RKEY_VIOLATION;
  550 		goto out;
  551 	}
  552 
  553 	qp->resp.va += data_len;
  554 	qp->resp.resid -= data_len;
  555 
  556 out:
  557 	return rc;
  558 }
  559 
  560 /* Guarantee atomicity of atomic operations at the machine level. */
  561 static DEFINE_SPINLOCK(atomic_ops_lock);
  562 
  563 static enum resp_states process_atomic(struct rxe_qp *qp,
  564 				       struct rxe_pkt_info *pkt)
  565 {
  566 	u64 iova = atmeth_va(pkt);
  567 	u64 *vaddr;
  568 	enum resp_states ret;
  569 	struct rxe_mem *mr = qp->resp.mr;
  570 
  571 	if (mr->state != RXE_MEM_STATE_VALID) {
  572 		ret = RESPST_ERR_RKEY_VIOLATION;
  573 		goto out;
  574 	}
  575 
  576 	vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
  577 
  578 	/* check vaddr is 8 bytes aligned. */
  579 	if (!vaddr || (uintptr_t)vaddr & 7) {
  580 		ret = RESPST_ERR_MISALIGNED_ATOMIC;
  581 		goto out;
  582 	}
  583 
  584 	spin_lock_bh(&atomic_ops_lock);
  585 
  586 	qp->resp.atomic_orig = *vaddr;
  587 
  588 	if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
  589 	    pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
  590 		if (*vaddr == atmeth_comp(pkt))
  591 			*vaddr = atmeth_swap_add(pkt);
  592 	} else {
  593 		*vaddr += atmeth_swap_add(pkt);
  594 	}
  595 
  596 	spin_unlock_bh(&atomic_ops_lock);
  597 
  598 	ret = RESPST_NONE;
  599 out:
  600 	return ret;
  601 }
  602 
  603 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
  604 					  struct rxe_pkt_info *pkt,
  605 					  struct rxe_pkt_info *ack,
  606 					  int opcode,
  607 					  int payload,
  608 					  u32 psn,
  609 					  u8 syndrome,
  610 					  u32 *crcp)
  611 {
  612 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  613 	struct sk_buff *skb;
  614 	u32 crc = 0;
  615 	u32 *p;
  616 	int paylen;
  617 	int pad;
  618 	int err;
  619 
  620 	/*
  621 	 * allocate packet
  622 	 */
  623 	pad = (-payload) & 0x3;
  624 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
  625 
  626 	skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
  627 	if (!skb)
  628 		return NULL;
  629 
  630 	ack->qp = qp;
  631 	ack->opcode = opcode;
  632 	ack->mask = rxe_opcode[opcode].mask;
  633 	ack->offset = pkt->offset;
  634 	ack->paylen = paylen;
  635 
  636 	/* fill in bth using the request packet headers */
  637 	memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
  638 
  639 	bth_set_opcode(ack, opcode);
  640 	bth_set_qpn(ack, qp->attr.dest_qp_num);
  641 	bth_set_pad(ack, pad);
  642 	bth_set_se(ack, 0);
  643 	bth_set_psn(ack, psn);
  644 	bth_set_ack(ack, 0);
  645 	ack->psn = psn;
  646 
  647 	if (ack->mask & RXE_AETH_MASK) {
  648 		aeth_set_syn(ack, syndrome);
  649 		aeth_set_msn(ack, qp->resp.msn);
  650 	}
  651 
  652 	if (ack->mask & RXE_ATMACK_MASK)
  653 		atmack_set_orig(ack, qp->resp.atomic_orig);
  654 
  655 	err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
  656 	if (err) {
  657 		kfree_skb(skb);
  658 		return NULL;
  659 	}
  660 
  661 	if (crcp) {
  662 		/* CRC computation will be continued by the caller */
  663 		*crcp = crc;
  664 	} else {
  665 		p = payload_addr(ack) + payload + bth_pad(ack);
  666 		*p = ~crc;
  667 	}
  668 
  669 	return skb;
  670 }
  671 
  672 /* RDMA read response. If res is not NULL, then we have a current RDMA request
  673  * being processed or replayed.
  674  */
  675 static enum resp_states read_reply(struct rxe_qp *qp,
  676 				   struct rxe_pkt_info *req_pkt)
  677 {
  678 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  679 	struct rxe_pkt_info ack_pkt;
  680 	struct sk_buff *skb;
  681 	int mtu = qp->mtu;
  682 	enum resp_states state;
  683 	int payload;
  684 	int opcode;
  685 	int err;
  686 	struct resp_res *res = qp->resp.res;
  687 	u32 icrc;
  688 	u32 *p;
  689 
  690 	if (!res) {
  691 		/* This is the first time we process that request. Get a
  692 		 * resource
  693 		 */
  694 		res = &qp->resp.resources[qp->resp.res_head];
  695 
  696 		free_rd_atomic_resource(qp, res);
  697 		rxe_advance_resp_resource(qp);
  698 
  699 		res->type		= RXE_READ_MASK;
  700 
  701 		res->read.va		= qp->resp.va;
  702 		res->read.va_org	= qp->resp.va;
  703 
  704 		res->first_psn		= req_pkt->psn;
  705 		res->last_psn		= req_pkt->psn +
  706 					  (reth_len(req_pkt) + mtu - 1) /
  707 					  mtu - 1;
  708 		res->cur_psn		= req_pkt->psn;
  709 
  710 		res->read.resid		= qp->resp.resid;
  711 		res->read.length	= qp->resp.resid;
  712 		res->read.rkey		= qp->resp.rkey;
  713 
  714 		/* note res inherits the reference to mr from qp */
  715 		res->read.mr		= qp->resp.mr;
  716 		qp->resp.mr		= NULL;
  717 
  718 		qp->resp.res		= res;
  719 		res->state		= rdatm_res_state_new;
  720 	}
  721 
  722 	if (res->state == rdatm_res_state_new) {
  723 		if (res->read.resid <= mtu)
  724 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
  725 		else
  726 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
  727 	} else {
  728 		if (res->read.resid > mtu)
  729 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
  730 		else
  731 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
  732 	}
  733 
  734 	res->state = rdatm_res_state_next;
  735 
  736 	payload = min_t(int, res->read.resid, mtu);
  737 
  738 	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
  739 				 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
  740 	if (!skb)
  741 		return RESPST_ERR_RNR;
  742 
  743 	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
  744 			   payload, from_mem_obj, &icrc);
  745 	if (err)
  746 		pr_err("Failed copying memory\n");
  747 
  748 	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
  749 	*p = ~icrc;
  750 
  751 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
  752 	if (err) {
  753 		pr_err("Failed sending RDMA reply.\n");
  754 		kfree_skb(skb);
  755 		return RESPST_ERR_RNR;
  756 	}
  757 
  758 	res->read.va += payload;
  759 	res->read.resid -= payload;
  760 	res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
  761 
  762 	if (res->read.resid > 0) {
  763 		state = RESPST_DONE;
  764 	} else {
  765 		qp->resp.res = NULL;
  766 		qp->resp.opcode = -1;
  767 		qp->resp.psn = res->cur_psn;
  768 		state = RESPST_CLEANUP;
  769 	}
  770 
  771 	return state;
  772 }
  773 
  774 /* Executes a new request. A retried request never reach that function (send
  775  * and writes are discarded, and reads and atomics are retried elsewhere.
  776  */
  777 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
  778 {
  779 	enum resp_states err;
  780 
  781 	if (pkt->mask & RXE_SEND_MASK) {
  782 		if (qp_type(qp) == IB_QPT_UD ||
  783 		    qp_type(qp) == IB_QPT_SMI ||
  784 		    qp_type(qp) == IB_QPT_GSI) {
  785 			union rdma_network_hdr hdr;
  786 			struct sk_buff *skb = PKT_TO_SKB(pkt);
  787 
  788 			memset(&hdr, 0, sizeof(hdr));
  789 			if (skb->protocol == htons(ETH_P_IP))
  790 				memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
  791 			else if (skb->protocol == htons(ETH_P_IPV6))
  792 				memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
  793 
  794 			err = send_data_in(qp, &hdr, sizeof(hdr));
  795 			if (err)
  796 				return err;
  797 		}
  798 		err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
  799 		if (err)
  800 			return err;
  801 	} else if (pkt->mask & RXE_WRITE_MASK) {
  802 		err = write_data_in(qp, pkt);
  803 		if (err)
  804 			return err;
  805 	} else if (pkt->mask & RXE_READ_MASK) {
  806 		/* For RDMA Read we can increment the msn now. See C9-148. */
  807 		qp->resp.msn++;
  808 		return RESPST_READ_REPLY;
  809 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
  810 		err = process_atomic(qp, pkt);
  811 		if (err)
  812 			return err;
  813 	} else
  814 		/* Unreachable */
  815 		WARN_ON(1);
  816 
  817 	/* We successfully processed this new request. */
  818 	qp->resp.msn++;
  819 
  820 	/* next expected psn, read handles this separately */
  821 	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
  822 
  823 	qp->resp.opcode = pkt->opcode;
  824 	qp->resp.status = IB_WC_SUCCESS;
  825 
  826 	if (pkt->mask & RXE_COMP_MASK)
  827 		return RESPST_COMPLETE;
  828 	else if (qp_type(qp) == IB_QPT_RC)
  829 		return RESPST_ACKNOWLEDGE;
  830 	else
  831 		return RESPST_CLEANUP;
  832 }
  833 
  834 static enum resp_states do_complete(struct rxe_qp *qp,
  835 				    struct rxe_pkt_info *pkt)
  836 {
  837 	struct rxe_cqe cqe;
  838 	struct ib_wc *wc = &cqe.ibwc;
  839 	struct ib_uverbs_wc *uwc = &cqe.uibwc;
  840 	struct rxe_recv_wqe *wqe = qp->resp.wqe;
  841 
  842 	if (unlikely(!wqe))
  843 		return RESPST_CLEANUP;
  844 
  845 	memset(&cqe, 0, sizeof(cqe));
  846 
  847 	wc->wr_id		= wqe->wr_id;
  848 	wc->status		= qp->resp.status;
  849 	wc->qp			= &qp->ibqp;
  850 
  851 	/* fields after status are not required for errors */
  852 	if (wc->status == IB_WC_SUCCESS) {
  853 		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
  854 				pkt->mask & RXE_WRITE_MASK) ?
  855 					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
  856 		wc->vendor_err = 0;
  857 		wc->byte_len = wqe->dma.length - wqe->dma.resid;
  858 
  859 		/* fields after byte_len are different between kernel and user
  860 		 * space
  861 		 */
  862 		if (qp->rcq->is_user) {
  863 			uwc->wc_flags = IB_WC_GRH;
  864 
  865 			if (pkt->mask & RXE_IMMDT_MASK) {
  866 				uwc->wc_flags |= IB_WC_WITH_IMM;
  867 				uwc->ex.imm_data =
  868 					(__u32 __force)immdt_imm(pkt);
  869 			}
  870 
  871 			if (pkt->mask & RXE_IETH_MASK) {
  872 				uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
  873 				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
  874 			}
  875 
  876 			uwc->qp_num		= qp->ibqp.qp_num;
  877 
  878 			if (pkt->mask & RXE_DETH_MASK)
  879 				uwc->src_qp = deth_sqp(pkt);
  880 
  881 			uwc->port_num		= qp->attr.port_num;
  882 		} else {
  883 			struct sk_buff *skb = PKT_TO_SKB(pkt);
  884 
  885 			wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
  886 			if (skb->protocol == htons(ETH_P_IP))
  887 				wc->network_hdr_type = RDMA_NETWORK_IPV4;
  888 			else
  889 				wc->network_hdr_type = RDMA_NETWORK_IPV6;
  890 
  891 			if (pkt->mask & RXE_IMMDT_MASK) {
  892 				wc->wc_flags |= IB_WC_WITH_IMM;
  893 				wc->ex.imm_data = immdt_imm(pkt);
  894 			}
  895 
  896 			if (pkt->mask & RXE_IETH_MASK) {
  897 				struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  898 				struct rxe_mem *rmr;
  899 
  900 				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  901 				wc->ex.invalidate_rkey = ieth_rkey(pkt);
  902 
  903 				rmr = rxe_pool_get_index(&rxe->mr_pool,
  904 							 wc->ex.invalidate_rkey >> 8);
  905 				if (unlikely(!rmr)) {
  906 					pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
  907 					return RESPST_ERROR;
  908 				}
  909 				rmr->state = RXE_MEM_STATE_FREE;
  910 			}
  911 
  912 			wc->qp			= &qp->ibqp;
  913 
  914 			if (pkt->mask & RXE_DETH_MASK)
  915 				wc->src_qp = deth_sqp(pkt);
  916 
  917 			wc->port_num		= qp->attr.port_num;
  918 		}
  919 	}
  920 
  921 	/* have copy for srq and reference for !srq */
  922 	if (!qp->srq)
  923 		advance_consumer(qp->rq.queue);
  924 
  925 	qp->resp.wqe = NULL;
  926 
  927 	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
  928 		return RESPST_ERR_CQ_OVERFLOW;
  929 
  930 	if (qp->resp.state == QP_STATE_ERROR)
  931 		return RESPST_CHK_RESOURCE;
  932 
  933 	if (!pkt)
  934 		return RESPST_DONE;
  935 	else if (qp_type(qp) == IB_QPT_RC)
  936 		return RESPST_ACKNOWLEDGE;
  937 	else
  938 		return RESPST_CLEANUP;
  939 }
  940 
  941 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
  942 		    u8 syndrome, u32 psn)
  943 {
  944 	int err = 0;
  945 	struct rxe_pkt_info ack_pkt;
  946 	struct sk_buff *skb;
  947 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  948 
  949 	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
  950 				 0, psn, syndrome, NULL);
  951 	if (!skb) {
  952 		err = -ENOMEM;
  953 		goto err1;
  954 	}
  955 
  956 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
  957 	if (err) {
  958 		pr_err_ratelimited("Failed sending ack\n");
  959 		kfree_skb(skb);
  960 	}
  961 
  962 err1:
  963 	return err;
  964 }
  965 
  966 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
  967 			   u8 syndrome)
  968 {
  969 	int rc = 0;
  970 	struct rxe_pkt_info ack_pkt;
  971 	struct sk_buff *skb;
  972 	struct sk_buff *skb_copy;
  973 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
  974 	struct resp_res *res;
  975 
  976 	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
  977 				 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
  978 				 syndrome, NULL);
  979 	if (!skb) {
  980 		rc = -ENOMEM;
  981 		goto out;
  982 	}
  983 
  984 	skb_copy = skb_clone(skb, GFP_ATOMIC);
  985 	if (skb_copy)
  986 		rxe_add_ref(qp); /* for the new SKB */
  987 	else {
  988 		pr_warn("Could not clone atomic response\n");
  989 		rc = -ENOMEM;
  990 		goto out;
  991 	}
  992 
  993 	res = &qp->resp.resources[qp->resp.res_head];
  994 	free_rd_atomic_resource(qp, res);
  995 	rxe_advance_resp_resource(qp);
  996 
  997 	res->type = RXE_ATOMIC_MASK;
  998 	res->atomic.skb = skb;
  999 	res->first_psn = qp->resp.psn;
 1000 	res->last_psn = qp->resp.psn;
 1001 	res->cur_psn = qp->resp.psn;
 1002 
 1003 	rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
 1004 	if (rc) {
 1005 		pr_err_ratelimited("Failed sending ack\n");
 1006 		rxe_drop_ref(qp);
 1007 		kfree_skb(skb_copy);
 1008 	}
 1009 
 1010 out:
 1011 	return rc;
 1012 }
 1013 
 1014 static enum resp_states acknowledge(struct rxe_qp *qp,
 1015 				    struct rxe_pkt_info *pkt)
 1016 {
 1017 	if (qp_type(qp) != IB_QPT_RC)
 1018 		return RESPST_CLEANUP;
 1019 
 1020 	if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
 1021 		send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
 1022 	else if (pkt->mask & RXE_ATOMIC_MASK)
 1023 		send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
 1024 	else if (bth_ack(pkt))
 1025 		send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
 1026 
 1027 	return RESPST_CLEANUP;
 1028 }
 1029 
 1030 static enum resp_states cleanup(struct rxe_qp *qp,
 1031 				struct rxe_pkt_info *pkt)
 1032 {
 1033 	struct sk_buff *skb;
 1034 
 1035 	if (pkt) {
 1036 		skb = skb_dequeue(&qp->req_pkts);
 1037 		rxe_drop_ref(qp);
 1038 		kfree_skb(skb);
 1039 	}
 1040 
 1041 	if (qp->resp.mr) {
 1042 		rxe_drop_ref(qp->resp.mr);
 1043 		qp->resp.mr = NULL;
 1044 	}
 1045 
 1046 	return RESPST_DONE;
 1047 }
 1048 
 1049 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
 1050 {
 1051 	int i;
 1052 
 1053 	for (i = 0; i < qp->attr.max_rd_atomic; i++) {
 1054 		struct resp_res *res = &qp->resp.resources[i];
 1055 
 1056 		if (res->type == 0)
 1057 			continue;
 1058 
 1059 		if (psn_compare(psn, res->first_psn) >= 0 &&
 1060 		    psn_compare(psn, res->last_psn) <= 0) {
 1061 			return res;
 1062 		}
 1063 	}
 1064 
 1065 	return NULL;
 1066 }
 1067 
 1068 static enum resp_states duplicate_request(struct rxe_qp *qp,
 1069 					  struct rxe_pkt_info *pkt)
 1070 {
 1071 	enum resp_states rc;
 1072 
 1073 	if (pkt->mask & RXE_SEND_MASK ||
 1074 	    pkt->mask & RXE_WRITE_MASK) {
 1075 		/* SEND. Ack again and cleanup. C9-105. */
 1076 		if (bth_ack(pkt))
 1077 			send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
 1078 		rc = RESPST_CLEANUP;
 1079 		goto out;
 1080 	} else if (pkt->mask & RXE_READ_MASK) {
 1081 		struct resp_res *res;
 1082 
 1083 		res = find_resource(qp, pkt->psn);
 1084 		if (!res) {
 1085 			/* Resource not found. Class D error.  Drop the
 1086 			 * request.
 1087 			 */
 1088 			rc = RESPST_CLEANUP;
 1089 			goto out;
 1090 		} else {
 1091 			/* Ensure this new request is the same as the previous
 1092 			 * one or a subset of it.
 1093 			 */
 1094 			u64 iova = reth_va(pkt);
 1095 			u32 resid = reth_len(pkt);
 1096 
 1097 			if (iova < res->read.va_org ||
 1098 			    resid > res->read.length ||
 1099 			    (iova + resid) > (res->read.va_org +
 1100 					      res->read.length)) {
 1101 				rc = RESPST_CLEANUP;
 1102 				goto out;
 1103 			}
 1104 
 1105 			if (reth_rkey(pkt) != res->read.rkey) {
 1106 				rc = RESPST_CLEANUP;
 1107 				goto out;
 1108 			}
 1109 
 1110 			res->cur_psn = pkt->psn;
 1111 			res->state = (pkt->psn == res->first_psn) ?
 1112 					rdatm_res_state_new :
 1113 					rdatm_res_state_replay;
 1114 
 1115 			/* Reset the resource, except length. */
 1116 			res->read.va_org = iova;
 1117 			res->read.va = iova;
 1118 			res->read.resid = resid;
 1119 
 1120 			/* Replay the RDMA read reply. */
 1121 			qp->resp.res = res;
 1122 			rc = RESPST_READ_REPLY;
 1123 			goto out;
 1124 		}
 1125 	} else {
 1126 		struct resp_res *res;
 1127 
 1128 		/* Find the operation in our list of responder resources. */
 1129 		res = find_resource(qp, pkt->psn);
 1130 		if (res) {
 1131 			struct sk_buff *skb_copy;
 1132 
 1133 			skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
 1134 			if (skb_copy) {
 1135 				rxe_add_ref(qp); /* for the new SKB */
 1136 			} else {
 1137 				pr_warn("Couldn't clone atomic resp\n");
 1138 				rc = RESPST_CLEANUP;
 1139 				goto out;
 1140 			}
 1141 			bth_set_psn(SKB_TO_PKT(skb_copy),
 1142 				    qp->resp.psn - 1);
 1143 			/* Resend the result. */
 1144 			rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
 1145 					     pkt, skb_copy);
 1146 			if (rc) {
 1147 				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
 1148 				kfree_skb(skb_copy);
 1149 				rc = RESPST_CLEANUP;
 1150 				goto out;
 1151 			}
 1152 		}
 1153 
 1154 		/* Resource not found. Class D error. Drop the request. */
 1155 		rc = RESPST_CLEANUP;
 1156 		goto out;
 1157 	}
 1158 out:
 1159 	return rc;
 1160 }
 1161 
 1162 /* Process a class A or C. Both are treated the same in this implementation. */
 1163 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
 1164 			      enum ib_wc_status status)
 1165 {
 1166 	qp->resp.aeth_syndrome	= syndrome;
 1167 	qp->resp.status		= status;
 1168 
 1169 	/* indicate that we should go through the ERROR state */
 1170 	qp->resp.goto_error	= 1;
 1171 }
 1172 
 1173 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
 1174 {
 1175 	/* UC */
 1176 	if (qp->srq) {
 1177 		/* Class E */
 1178 		qp->resp.drop_msg = 1;
 1179 		if (qp->resp.wqe) {
 1180 			qp->resp.status = IB_WC_REM_INV_REQ_ERR;
 1181 			return RESPST_COMPLETE;
 1182 		} else {
 1183 			return RESPST_CLEANUP;
 1184 		}
 1185 	} else {
 1186 		/* Class D1. This packet may be the start of a
 1187 		 * new message and could be valid. The previous
 1188 		 * message is invalid and ignored. reset the
 1189 		 * recv wr to its original state
 1190 		 */
 1191 		if (qp->resp.wqe) {
 1192 			qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
 1193 			qp->resp.wqe->dma.cur_sge = 0;
 1194 			qp->resp.wqe->dma.sge_offset = 0;
 1195 			qp->resp.opcode = -1;
 1196 		}
 1197 
 1198 		if (qp->resp.mr) {
 1199 			rxe_drop_ref(qp->resp.mr);
 1200 			qp->resp.mr = NULL;
 1201 		}
 1202 
 1203 		return RESPST_CLEANUP;
 1204 	}
 1205 }
 1206 
 1207 int rxe_responder(void *arg)
 1208 {
 1209 	struct rxe_qp *qp = (struct rxe_qp *)arg;
 1210 	enum resp_states state;
 1211 	struct rxe_pkt_info *pkt = NULL;
 1212 	int ret = 0;
 1213 
 1214 	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
 1215 
 1216 	if (!qp->valid) {
 1217 		ret = -EINVAL;
 1218 		goto done;
 1219 	}
 1220 
 1221 	switch (qp->resp.state) {
 1222 	case QP_STATE_RESET:
 1223 		state = RESPST_RESET;
 1224 		break;
 1225 
 1226 	default:
 1227 		state = RESPST_GET_REQ;
 1228 		break;
 1229 	}
 1230 
 1231 	while (1) {
 1232 		pr_debug("state = %s\n", resp_state_name[state]);
 1233 		switch (state) {
 1234 		case RESPST_GET_REQ:
 1235 			state = get_req(qp, &pkt);
 1236 			break;
 1237 		case RESPST_CHK_PSN:
 1238 			state = check_psn(qp, pkt);
 1239 			break;
 1240 		case RESPST_CHK_OP_SEQ:
 1241 			state = check_op_seq(qp, pkt);
 1242 			break;
 1243 		case RESPST_CHK_OP_VALID:
 1244 			state = check_op_valid(qp, pkt);
 1245 			break;
 1246 		case RESPST_CHK_RESOURCE:
 1247 			state = check_resource(qp, pkt);
 1248 			break;
 1249 		case RESPST_CHK_LENGTH:
 1250 			state = check_length(qp, pkt);
 1251 			break;
 1252 		case RESPST_CHK_RKEY:
 1253 			state = check_rkey(qp, pkt);
 1254 			break;
 1255 		case RESPST_EXECUTE:
 1256 			state = execute(qp, pkt);
 1257 			break;
 1258 		case RESPST_COMPLETE:
 1259 			state = do_complete(qp, pkt);
 1260 			break;
 1261 		case RESPST_READ_REPLY:
 1262 			state = read_reply(qp, pkt);
 1263 			break;
 1264 		case RESPST_ACKNOWLEDGE:
 1265 			state = acknowledge(qp, pkt);
 1266 			break;
 1267 		case RESPST_CLEANUP:
 1268 			state = cleanup(qp, pkt);
 1269 			break;
 1270 		case RESPST_DUPLICATE_REQUEST:
 1271 			state = duplicate_request(qp, pkt);
 1272 			break;
 1273 		case RESPST_ERR_PSN_OUT_OF_SEQ:
 1274 			/* RC only - Class B. Drop packet. */
 1275 			send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
 1276 			state = RESPST_CLEANUP;
 1277 			break;
 1278 
 1279 		case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
 1280 		case RESPST_ERR_MISSING_OPCODE_FIRST:
 1281 		case RESPST_ERR_MISSING_OPCODE_LAST_C:
 1282 		case RESPST_ERR_UNSUPPORTED_OPCODE:
 1283 		case RESPST_ERR_MISALIGNED_ATOMIC:
 1284 			/* RC Only - Class C. */
 1285 			do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
 1286 					  IB_WC_REM_INV_REQ_ERR);
 1287 			state = RESPST_COMPLETE;
 1288 			break;
 1289 
 1290 		case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
 1291 			state = do_class_d1e_error(qp);
 1292 			break;
 1293 		case RESPST_ERR_RNR:
 1294 			if (qp_type(qp) == IB_QPT_RC) {
 1295 				/* RC - class B */
 1296 				send_ack(qp, pkt, AETH_RNR_NAK |
 1297 					 (~AETH_TYPE_MASK &
 1298 					 qp->attr.min_rnr_timer),
 1299 					 pkt->psn);
 1300 			} else {
 1301 				/* UD/UC - class D */
 1302 				qp->resp.drop_msg = 1;
 1303 			}
 1304 			state = RESPST_CLEANUP;
 1305 			break;
 1306 
 1307 		case RESPST_ERR_RKEY_VIOLATION:
 1308 			if (qp_type(qp) == IB_QPT_RC) {
 1309 				/* Class C */
 1310 				do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
 1311 						  IB_WC_REM_ACCESS_ERR);
 1312 				state = RESPST_COMPLETE;
 1313 			} else {
 1314 				qp->resp.drop_msg = 1;
 1315 				if (qp->srq) {
 1316 					/* UC/SRQ Class D */
 1317 					qp->resp.status = IB_WC_REM_ACCESS_ERR;
 1318 					state = RESPST_COMPLETE;
 1319 				} else {
 1320 					/* UC/non-SRQ Class E. */
 1321 					state = RESPST_CLEANUP;
 1322 				}
 1323 			}
 1324 			break;
 1325 
 1326 		case RESPST_ERR_LENGTH:
 1327 			if (qp_type(qp) == IB_QPT_RC) {
 1328 				/* Class C */
 1329 				do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
 1330 						  IB_WC_REM_INV_REQ_ERR);
 1331 				state = RESPST_COMPLETE;
 1332 			} else if (qp->srq) {
 1333 				/* UC/UD - class E */
 1334 				qp->resp.status = IB_WC_REM_INV_REQ_ERR;
 1335 				state = RESPST_COMPLETE;
 1336 			} else {
 1337 				/* UC/UD - class D */
 1338 				qp->resp.drop_msg = 1;
 1339 				state = RESPST_CLEANUP;
 1340 			}
 1341 			break;
 1342 
 1343 		case RESPST_ERR_MALFORMED_WQE:
 1344 			/* All, Class A. */
 1345 			do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
 1346 					  IB_WC_LOC_QP_OP_ERR);
 1347 			state = RESPST_COMPLETE;
 1348 			break;
 1349 
 1350 		case RESPST_ERR_CQ_OVERFLOW:
 1351 			/* All - Class G */
 1352 			state = RESPST_ERROR;
 1353 			break;
 1354 
 1355 		case RESPST_DONE:
 1356 			if (qp->resp.goto_error) {
 1357 				state = RESPST_ERROR;
 1358 				break;
 1359 			}
 1360 
 1361 			goto done;
 1362 
 1363 		case RESPST_EXIT:
 1364 			if (qp->resp.goto_error) {
 1365 				state = RESPST_ERROR;
 1366 				break;
 1367 			}
 1368 
 1369 			goto exit;
 1370 
 1371 		case RESPST_RESET: {
 1372 			struct sk_buff *skb;
 1373 
 1374 			while ((skb = skb_dequeue(&qp->req_pkts))) {
 1375 				rxe_drop_ref(qp);
 1376 				kfree_skb(skb);
 1377 			}
 1378 
 1379 			while (!qp->srq && qp->rq.queue &&
 1380 			       queue_head(qp->rq.queue))
 1381 				advance_consumer(qp->rq.queue);
 1382 
 1383 			qp->resp.wqe = NULL;
 1384 			goto exit;
 1385 		}
 1386 
 1387 		case RESPST_ERROR:
 1388 			qp->resp.goto_error = 0;
 1389 			pr_warn("qp#%d moved to error state\n", qp_num(qp));
 1390 			rxe_qp_error(qp);
 1391 			goto exit;
 1392 
 1393 		default:
 1394 			WARN_ON(1);
 1395 		}
 1396 	}
 1397 
 1398 exit:
 1399 	ret = -EAGAIN;
 1400 done:
 1401 	return ret;
 1402 }
 1403 
 1404 #line 22 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/9940/dscv_tempdir/dscv/ri/43_1a/drivers/infiniband/sw/rxe/rxe_resp.o.c.prepared"           1 
    2 
    3 /* Here is the definition of CHECK_WAIT_FLAGS(flags) macro. */
    4 #include "include/gfp.h"
    5 #include <linux/gfp.h>
    6 #include <verifier/rcv.h>
    7 #include <kernel-model/ERR.inc>
    8 
    9 #define LDV_ZERO_STATE 0
   10 
   11 
   12 /* There are 2 possible states of spin lock. */
   13 enum {
   14   LDV_SPIN_UNLOCKED = LDV_ZERO_STATE, /* Spin isn't locked. */
   15   LDV_SPIN_LOCKED /* Spin is locked. */
   16 };
   17 
   18 
   19 /* Spin isn't locked at the beginning. */
   20 int ldv_spin = LDV_SPIN_UNLOCKED;
   21 
   22 
   23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags') Check that a memory allocating function was called with a correct value of flags in spin locking. */
   24 void ldv_check_alloc_flags(gfp_t flags)
   25 {
   26   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */
   27   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags));
   28 }
   29 
   30 extern struct page *ldv_some_page(void);
   31 
   32 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags_and_return_some_page') Check that a memory allocating function was called with a correct value of flags in spin locking. */
   33 struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags)
   34 {
   35   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */
   36   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags));
   37   /* LDV_COMMENT_RETURN Return a page pointer (maybe NULL). */
   38   return ldv_some_page();
   39 }
   40 
   41 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_nonatomic') Check that a memory allocating function was not calledin spin locking. */
   42 void ldv_check_alloc_nonatomic(void)
   43 {
   44   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then the memory allocating function should be called, because it implicitly uses GFP_KERNEL flag. */
   45   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED);
   46 }
   47 
   48 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock') Lock spin. */
   49 void ldv_spin_lock(void)
   50 {
   51   /* LDV_COMMENT_CHANGE_STATE Lock spin. */
   52   ldv_spin = LDV_SPIN_LOCKED;
   53 }
   54 
   55 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock') Unlock spin. */
   56 void ldv_spin_unlock(void)
   57 {
   58   /* LDV_COMMENT_CHANGE_STATE Unlock spin. */
   59   ldv_spin = LDV_SPIN_UNLOCKED;
   60 }
   61 
   62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock') Try to lock spin. It should return 0 if spin wasn't locked. */
   63 int ldv_spin_trylock(void)
   64 {
   65   int is_lock;
   66 
   67   /* LDV_COMMENT_OTHER Do this to make nondetermined choice. */
   68   is_lock = ldv_undef_int();
   69 
   70   if (is_lock)
   71   {
   72     /* LDV_COMMENT_RETURN Don't lock spin and return 0. */
   73     return 0;
   74   }
   75   else
   76   {
   77     /* LDV_COMMENT_CHANGE_STATE Lock spin. */
   78     ldv_spin = LDV_SPIN_LOCKED;
   79     /* LDV_COMMENT_RETURN Return 1 since spin was locked. */
   80     return 1;
   81   }
   82 }           1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */           1 #ifndef _IPV6_H
    2 #define _IPV6_H
    3 
    4 #include <uapi/linux/ipv6.h>
    5 
    6 #define ipv6_optlen(p)  (((p)->hdrlen+1) << 3)
    7 #define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
    8 /*
    9  * This structure contains configuration options per IPv6 link.
   10  */
   11 struct ipv6_devconf {
   12 	__s32		forwarding;
   13 	__s32		hop_limit;
   14 	__s32		mtu6;
   15 	__s32		accept_ra;
   16 	__s32		accept_redirects;
   17 	__s32		autoconf;
   18 	__s32		dad_transmits;
   19 	__s32		rtr_solicits;
   20 	__s32		rtr_solicit_interval;
   21 	__s32		rtr_solicit_delay;
   22 	__s32		force_mld_version;
   23 	__s32		mldv1_unsolicited_report_interval;
   24 	__s32		mldv2_unsolicited_report_interval;
   25 	__s32		use_tempaddr;
   26 	__s32		temp_valid_lft;
   27 	__s32		temp_prefered_lft;
   28 	__s32		regen_max_retry;
   29 	__s32		max_desync_factor;
   30 	__s32		max_addresses;
   31 	__s32		accept_ra_defrtr;
   32 	__s32		accept_ra_min_hop_limit;
   33 	__s32		accept_ra_pinfo;
   34 	__s32		ignore_routes_with_linkdown;
   35 #ifdef CONFIG_IPV6_ROUTER_PREF
   36 	__s32		accept_ra_rtr_pref;
   37 	__s32		rtr_probe_interval;
   38 #ifdef CONFIG_IPV6_ROUTE_INFO
   39 	__s32		accept_ra_rt_info_max_plen;
   40 #endif
   41 #endif
   42 	__s32		proxy_ndp;
   43 	__s32		accept_source_route;
   44 	__s32		accept_ra_from_local;
   45 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
   46 	__s32		optimistic_dad;
   47 	__s32		use_optimistic;
   48 #endif
   49 #ifdef CONFIG_IPV6_MROUTE
   50 	__s32		mc_forwarding;
   51 #endif
   52 	__s32		disable_ipv6;
   53 	__s32		drop_unicast_in_l2_multicast;
   54 	__s32		accept_dad;
   55 	__s32		force_tllao;
   56 	__s32           ndisc_notify;
   57 	__s32		suppress_frag_ndisc;
   58 	__s32		accept_ra_mtu;
   59 	__s32		drop_unsolicited_na;
   60 	struct ipv6_stable_secret {
   61 		bool initialized;
   62 		struct in6_addr secret;
   63 	} stable_secret;
   64 	__s32		use_oif_addrs_only;
   65 	__s32		keep_addr_on_down;
   66 
   67 	struct ctl_table_header *sysctl_header;
   68 };
   69 
   70 struct ipv6_params {
   71 	__s32 disable_ipv6;
   72 	__s32 autoconf;
   73 };
   74 extern struct ipv6_params ipv6_defaults;
   75 #include <linux/icmpv6.h>
   76 #include <linux/tcp.h>
   77 #include <linux/udp.h>
   78 
   79 #include <net/inet_sock.h>
   80 
   81 static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
   82 {
   83 	return (struct ipv6hdr *)skb_network_header(skb);
   84 }
   85 
   86 static inline struct ipv6hdr *inner_ipv6_hdr(const struct sk_buff *skb)
   87 {
   88 	return (struct ipv6hdr *)skb_inner_network_header(skb);
   89 }
   90 
   91 static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
   92 {
   93 	return (struct ipv6hdr *)skb_transport_header(skb);
   94 }
   95 
   96 /* 
   97    This structure contains results of exthdrs parsing
   98    as offsets from skb->nh.
   99  */
  100 
  101 struct inet6_skb_parm {
  102 	int			iif;
  103 	__be16			ra;
  104 	__u16			dst0;
  105 	__u16			srcrt;
  106 	__u16			dst1;
  107 	__u16			lastopt;
  108 	__u16			nhoff;
  109 	__u16			flags;
  110 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
  111 	__u16			dsthao;
  112 #endif
  113 	__u16			frag_max_size;
  114 
  115 #define IP6SKB_XFRM_TRANSFORMED	1
  116 #define IP6SKB_FORWARDED	2
  117 #define IP6SKB_REROUTED		4
  118 #define IP6SKB_ROUTERALERT	8
  119 #define IP6SKB_FRAGMENTED      16
  120 #define IP6SKB_HOPBYHOP        32
  121 #define IP6SKB_L3SLAVE         64
  122 };
  123 
  124 #if defined(CONFIG_NET_L3_MASTER_DEV)
  125 static inline bool skb_l3mdev_slave(__u16 flags)
  126 {
  127 	return flags & IP6SKB_L3SLAVE;
  128 }
  129 #else
  130 static inline bool skb_l3mdev_slave(__u16 flags)
  131 {
  132 	return false;
  133 }
  134 #endif
  135 
  136 #define IP6CB(skb)	((struct inet6_skb_parm*)((skb)->cb))
  137 #define IP6CBMTU(skb)	((struct ip6_mtuinfo *)((skb)->cb))
  138 
  139 static inline int inet6_iif(const struct sk_buff *skb)
  140 {
  141 	bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
  142 
  143 	return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
  144 }
  145 
  146 struct tcp6_request_sock {
  147 	struct tcp_request_sock	  tcp6rsk_tcp;
  148 };
  149 
  150 struct ipv6_mc_socklist;
  151 struct ipv6_ac_socklist;
  152 struct ipv6_fl_socklist;
  153 
  154 struct inet6_cork {
  155 	struct ipv6_txoptions *opt;
  156 	u8 hop_limit;
  157 	u8 tclass;
  158 };
  159 
  160 /**
  161  * struct ipv6_pinfo - ipv6 private area
  162  *
  163  * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
  164  * this _must_ be the last member, so that inet6_sk_generic
  165  * is able to calculate its offset from the base struct sock
  166  * by using the struct proto->slab_obj_size member. -acme
  167  */
  168 struct ipv6_pinfo {
  169 	struct in6_addr 	saddr;
  170 	struct in6_pktinfo	sticky_pktinfo;
  171 	const struct in6_addr		*daddr_cache;
  172 #ifdef CONFIG_IPV6_SUBTREES
  173 	const struct in6_addr		*saddr_cache;
  174 #endif
  175 
  176 	__be32			flow_label;
  177 	__u32			frag_size;
  178 
  179 	/*
  180 	 * Packed in 16bits.
  181 	 * Omit one shift by by putting the signed field at MSB.
  182 	 */
  183 #if defined(__BIG_ENDIAN_BITFIELD)
  184 	__s16			hop_limit:9;
  185 	__u16			__unused_1:7;
  186 #else
  187 	__u16			__unused_1:7;
  188 	__s16			hop_limit:9;
  189 #endif
  190 
  191 #if defined(__BIG_ENDIAN_BITFIELD)
  192 	/* Packed in 16bits. */
  193 	__s16			mcast_hops:9;
  194 	__u16			__unused_2:6,
  195 				mc_loop:1;
  196 #else
  197 	__u16			mc_loop:1,
  198 				__unused_2:6;
  199 	__s16			mcast_hops:9;
  200 #endif
  201 	int			ucast_oif;
  202 	int			mcast_oif;
  203 
  204 	/* pktoption flags */
  205 	union {
  206 		struct {
  207 			__u16	srcrt:1,
  208 				osrcrt:1,
  209 			        rxinfo:1,
  210 			        rxoinfo:1,
  211 				rxhlim:1,
  212 				rxohlim:1,
  213 				hopopts:1,
  214 				ohopopts:1,
  215 				dstopts:1,
  216 				odstopts:1,
  217                                 rxflow:1,
  218 				rxtclass:1,
  219 				rxpmtu:1,
  220 				rxorigdstaddr:1;
  221 				/* 2 bits hole */
  222 		} bits;
  223 		__u16		all;
  224 	} rxopt;
  225 
  226 	/* sockopt flags */
  227 	__u16			recverr:1,
  228 	                        sndflow:1,
  229 				repflow:1,
  230 				pmtudisc:3,
  231 				padding:1,	/* 1 bit hole */
  232 				srcprefs:3,	/* 001: prefer temporary address
  233 						 * 010: prefer public address
  234 						 * 100: prefer care-of address
  235 						 */
  236 				dontfrag:1,
  237 				autoflowlabel:1;
  238 	__u8			min_hopcount;
  239 	__u8			tclass;
  240 	__be32			rcv_flowinfo;
  241 
  242 	__u32			dst_cookie;
  243 	__u32			rx_dst_cookie;
  244 
  245 	struct ipv6_mc_socklist	__rcu *ipv6_mc_list;
  246 	struct ipv6_ac_socklist	*ipv6_ac_list;
  247 	struct ipv6_fl_socklist __rcu *ipv6_fl_list;
  248 
  249 	struct ipv6_txoptions __rcu	*opt;
  250 	struct sk_buff		*pktoptions;
  251 	struct sk_buff		*rxpmtu;
  252 	struct inet6_cork	cork;
  253 };
  254 
  255 /* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
  256 struct raw6_sock {
  257 	/* inet_sock has to be the first member of raw6_sock */
  258 	struct inet_sock	inet;
  259 	__u32			checksum;	/* perform checksum */
  260 	__u32			offset;		/* checksum offset  */
  261 	struct icmp6_filter	filter;
  262 	__u32			ip6mr_table;
  263 	/* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
  264 	struct ipv6_pinfo	inet6;
  265 };
  266 
  267 struct udp6_sock {
  268 	struct udp_sock	  udp;
  269 	/* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */
  270 	struct ipv6_pinfo inet6;
  271 };
  272 
  273 struct tcp6_sock {
  274 	struct tcp_sock	  tcp;
  275 	/* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */
  276 	struct ipv6_pinfo inet6;
  277 };
  278 
  279 extern int inet6_sk_rebuild_header(struct sock *sk);
  280 
  281 struct tcp6_timewait_sock {
  282 	struct tcp_timewait_sock   tcp6tw_tcp;
  283 };
  284 
  285 #if IS_ENABLED(CONFIG_IPV6)
  286 bool ipv6_mod_enabled(void);
  287 
  288 static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
  289 {
  290 	return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
  291 }
  292 
  293 static inline struct raw6_sock *raw6_sk(const struct sock *sk)
  294 {
  295 	return (struct raw6_sock *)sk;
  296 }
  297 
  298 static inline void inet_sk_copy_descendant(struct sock *sk_to,
  299 					   const struct sock *sk_from)
  300 {
  301 	int ancestor_size = sizeof(struct inet_sock);
  302 
  303 	if (sk_from->sk_family == PF_INET6)
  304 		ancestor_size += sizeof(struct ipv6_pinfo);
  305 
  306 	__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
  307 }
  308 
  309 #define __ipv6_only_sock(sk)	(sk->sk_ipv6only)
  310 #define ipv6_only_sock(sk)	(__ipv6_only_sock(sk))
  311 #define ipv6_sk_rxinfo(sk)	((sk)->sk_family == PF_INET6 && \
  312 				 inet6_sk(sk)->rxopt.bits.rxinfo)
  313 
  314 static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
  315 {
  316 	if (sk->sk_family == AF_INET6)
  317 		return &sk->sk_v6_rcv_saddr;
  318 	return NULL;
  319 }
  320 
  321 static inline int inet_v6_ipv6only(const struct sock *sk)
  322 {
  323 	/* ipv6only field is at same position for timewait and other sockets */
  324 	return ipv6_only_sock(sk);
  325 }
  326 #else
  327 #define __ipv6_only_sock(sk)	0
  328 #define ipv6_only_sock(sk)	0
  329 #define ipv6_sk_rxinfo(sk)	0
  330 
  331 static inline bool ipv6_mod_enabled(void)
  332 {
  333 	return false;
  334 }
  335 
  336 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
  337 {
  338 	return NULL;
  339 }
  340 
  341 static inline struct inet6_request_sock *
  342 			inet6_rsk(const struct request_sock *rsk)
  343 {
  344 	return NULL;
  345 }
  346 
  347 static inline struct raw6_sock *raw6_sk(const struct sock *sk)
  348 {
  349 	return NULL;
  350 }
  351 
  352 #define inet6_rcv_saddr(__sk)	NULL
  353 #define tcp_twsk_ipv6only(__sk)		0
  354 #define inet_v6_ipv6only(__sk)		0
  355 #endif /* IS_ENABLED(CONFIG_IPV6) */
  356 #endif /* _IPV6_H */           1 /*
    2  * kref.h - library routines for handling generic reference counted objects
    3  *
    4  * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
    5  * Copyright (C) 2004 IBM Corp.
    6  *
    7  * based on kobject.h which was:
    8  * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
    9  * Copyright (C) 2002-2003 Open Source Development Labs
   10  *
   11  * This file is released under the GPLv2.
   12  *
   13  */
   14 
   15 #ifndef _KREF_H_
   16 #define _KREF_H_
   17 
   18 #include <linux/bug.h>
   19 #include <linux/atomic.h>
   20 #include <linux/kernel.h>
   21 #include <linux/mutex.h>
   22 
   23 struct kref {
   24 	atomic_t refcount;
   25 };
   26 
   27 /**
   28  * kref_init - initialize object.
   29  * @kref: object in question.
   30  */
   31 static inline void kref_init(struct kref *kref)
   32 {
   33 	atomic_set(&kref->refcount, 1);
   34 }
   35 
   36 /**
   37  * kref_get - increment refcount for object.
   38  * @kref: object.
   39  */
   40 static inline void kref_get(struct kref *kref)
   41 {
   42 	/* If refcount was 0 before incrementing then we have a race
   43 	 * condition when this kref is freeing by some other thread right now.
   44 	 * In this case one should use kref_get_unless_zero()
   45 	 */
   46 	WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
   47 }
   48 
   49 /**
   50  * kref_sub - subtract a number of refcounts for object.
   51  * @kref: object.
   52  * @count: Number of recounts to subtract.
   53  * @release: pointer to the function that will clean up the object when the
   54  *	     last reference to the object is released.
   55  *	     This pointer is required, and it is not acceptable to pass kfree
   56  *	     in as this function.  If the caller does pass kfree to this
   57  *	     function, you will be publicly mocked mercilessly by the kref
   58  *	     maintainer, and anyone else who happens to notice it.  You have
   59  *	     been warned.
   60  *
   61  * Subtract @count from the refcount, and if 0, call release().
   62  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
   63  * function returns 0, you still can not count on the kref from remaining in
   64  * memory.  Only use the return value if you want to see if the kref is now
   65  * gone, not present.
   66  */
   67 static inline int kref_sub(struct kref *kref, unsigned int count,
   68 	     void (*release)(struct kref *kref))
   69 {
   70 	WARN_ON(release == NULL);
   71 
   72 	if (atomic_sub_and_test((int) count, &kref->refcount)) {
   73 		release(kref);
   74 		return 1;
   75 	}
   76 	return 0;
   77 }
   78 
   79 /**
   80  * kref_put - decrement refcount for object.
   81  * @kref: object.
   82  * @release: pointer to the function that will clean up the object when the
   83  *	     last reference to the object is released.
   84  *	     This pointer is required, and it is not acceptable to pass kfree
   85  *	     in as this function.  If the caller does pass kfree to this
   86  *	     function, you will be publicly mocked mercilessly by the kref
   87  *	     maintainer, and anyone else who happens to notice it.  You have
   88  *	     been warned.
   89  *
   90  * Decrement the refcount, and if 0, call release().
   91  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
   92  * function returns 0, you still can not count on the kref from remaining in
   93  * memory.  Only use the return value if you want to see if the kref is now
   94  * gone, not present.
   95  */
   96 static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
   97 {
   98 	return kref_sub(kref, 1, release);
   99 }
  100 
  101 static inline int kref_put_mutex(struct kref *kref,
  102 				 void (*release)(struct kref *kref),
  103 				 struct mutex *lock)
  104 {
  105 	WARN_ON(release == NULL);
  106 	if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
  107 		mutex_lock(lock);
  108 		if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
  109 			mutex_unlock(lock);
  110 			return 0;
  111 		}
  112 		release(kref);
  113 		return 1;
  114 	}
  115 	return 0;
  116 }
  117 
  118 /**
  119  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  120  * @kref: object.
  121  *
  122  * Return non-zero if the increment succeeded. Otherwise return 0.
  123  *
  124  * This function is intended to simplify locking around refcounting for
  125  * objects that can be looked up from a lookup structure, and which are
  126  * removed from that lookup structure in the object destructor.
  127  * Operations on such objects require at least a read lock around
  128  * lookup + kref_get, and a write lock around kref_put + remove from lookup
  129  * structure. Furthermore, RCU implementations become extremely tricky.
  130  * With a lookup followed by a kref_get_unless_zero *with return value check*
  131  * locking in the kref_put path can be deferred to the actual removal from
  132  * the lookup structure and RCU lookups become trivial.
  133  */
  134 static inline int __must_check kref_get_unless_zero(struct kref *kref)
  135 {
  136 	return atomic_add_unless(&kref->refcount, 1, 0);
  137 }
  138 #endif /* _KREF_H_ */           1 /*
    2  *	Definitions for the 'struct sk_buff' memory handlers.
    3  *
    4  *	Authors:
    5  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
    6  *		Florian La Roche, <rzsfl@rz.uni-sb.de>
    7  *
    8  *	This program is free software; you can redistribute it and/or
    9  *	modify it under the terms of the GNU General Public License
   10  *	as published by the Free Software Foundation; either version
   11  *	2 of the License, or (at your option) any later version.
   12  */
   13 
   14 #ifndef _LINUX_SKBUFF_H
   15 #define _LINUX_SKBUFF_H
   16 
   17 #include <linux/kernel.h>
   18 #include <linux/kmemcheck.h>
   19 #include <linux/compiler.h>
   20 #include <linux/time.h>
   21 #include <linux/bug.h>
   22 #include <linux/cache.h>
   23 #include <linux/rbtree.h>
   24 #include <linux/socket.h>
   25 
   26 #include <linux/atomic.h>
   27 #include <asm/types.h>
   28 #include <linux/spinlock.h>
   29 #include <linux/net.h>
   30 #include <linux/textsearch.h>
   31 #include <net/checksum.h>
   32 #include <linux/rcupdate.h>
   33 #include <linux/hrtimer.h>
   34 #include <linux/dma-mapping.h>
   35 #include <linux/netdev_features.h>
   36 #include <linux/sched.h>
   37 #include <net/flow_dissector.h>
   38 #include <linux/splice.h>
   39 #include <linux/in6.h>
   40 #include <linux/if_packet.h>
   41 #include <net/flow.h>
   42 
   43 /* The interface for checksum offload between the stack and networking drivers
   44  * is as follows...
   45  *
   46  * A. IP checksum related features
   47  *
   48  * Drivers advertise checksum offload capabilities in the features of a device.
   49  * From the stack's point of view these are capabilities offered by the driver,
   50  * a driver typically only advertises features that it is capable of offloading
   51  * to its device.
   52  *
   53  * The checksum related features are:
   54  *
   55  *	NETIF_F_HW_CSUM	- The driver (or its device) is able to compute one
   56  *			  IP (one's complement) checksum for any combination
   57  *			  of protocols or protocol layering. The checksum is
   58  *			  computed and set in a packet per the CHECKSUM_PARTIAL
   59  *			  interface (see below).
   60  *
   61  *	NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
   62  *			  TCP or UDP packets over IPv4. These are specifically
   63  *			  unencapsulated packets of the form IPv4|TCP or
   64  *			  IPv4|UDP where the Protocol field in the IPv4 header
   65  *			  is TCP or UDP. The IPv4 header may contain IP options
   66  *			  This feature cannot be set in features for a device
   67  *			  with NETIF_F_HW_CSUM also set. This feature is being
   68  *			  DEPRECATED (see below).
   69  *
   70  *	NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
   71  *			  TCP or UDP packets over IPv6. These are specifically
   72  *			  unencapsulated packets of the form IPv6|TCP or
   73  *			  IPv4|UDP where the Next Header field in the IPv6
   74  *			  header is either TCP or UDP. IPv6 extension headers
   75  *			  are not supported with this feature. This feature
   76  *			  cannot be set in features for a device with
   77  *			  NETIF_F_HW_CSUM also set. This feature is being
   78  *			  DEPRECATED (see below).
   79  *
   80  *	NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
   81  *			 This flag is used only used to disable the RX checksum
   82  *			 feature for a device. The stack will accept receive
   83  *			 checksum indication in packets received on a device
   84  *			 regardless of whether NETIF_F_RXCSUM is set.
   85  *
   86  * B. Checksumming of received packets by device. Indication of checksum
   87  *    verification is in set skb->ip_summed. Possible values are:
   88  *
   89  * CHECKSUM_NONE:
   90  *
   91  *   Device did not checksum this packet e.g. due to lack of capabilities.
   92  *   The packet contains full (though not verified) checksum in packet but
   93  *   not in skb->csum. Thus, skb->csum is undefined in this case.
   94  *
   95  * CHECKSUM_UNNECESSARY:
   96  *
   97  *   The hardware you're dealing with doesn't calculate the full checksum
   98  *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
   99  *   for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
  100  *   if their checksums are okay. skb->csum is still undefined in this case
  101  *   though. A driver or device must never modify the checksum field in the
  102  *   packet even if checksum is verified.
  103  *
  104  *   CHECKSUM_UNNECESSARY is applicable to following protocols:
  105  *     TCP: IPv6 and IPv4.
  106  *     UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
  107  *       zero UDP checksum for either IPv4 or IPv6, the networking stack
  108  *       may perform further validation in this case.
  109  *     GRE: only if the checksum is present in the header.
  110  *     SCTP: indicates the CRC in SCTP header has been validated.
  111  *
  112  *   skb->csum_level indicates the number of consecutive checksums found in
  113  *   the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
  114  *   For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
  115  *   and a device is able to verify the checksums for UDP (possibly zero),
  116  *   GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
  117  *   two. If the device were only able to verify the UDP checksum and not
  118  *   GRE, either because it doesn't support GRE checksum of because GRE
  119  *   checksum is bad, skb->csum_level would be set to zero (TCP checksum is
  120  *   not considered in this case).
  121  *
  122  * CHECKSUM_COMPLETE:
  123  *
  124  *   This is the most generic way. The device supplied checksum of the _whole_
  125  *   packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
  126  *   hardware doesn't need to parse L3/L4 headers to implement this.
  127  *
  128  *   Note: Even if device supports only some protocols, but is able to produce
  129  *   skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
  130  *
  131  * CHECKSUM_PARTIAL:
  132  *
  133  *   A checksum is set up to be offloaded to a device as described in the
  134  *   output description for CHECKSUM_PARTIAL. This may occur on a packet
  135  *   received directly from another Linux OS, e.g., a virtualized Linux kernel
  136  *   on the same host, or it may be set in the input path in GRO or remote
  137  *   checksum offload. For the purposes of checksum verification, the checksum
  138  *   referred to by skb->csum_start + skb->csum_offset and any preceding
  139  *   checksums in the packet are considered verified. Any checksums in the
  140  *   packet that are after the checksum being offloaded are not considered to
  141  *   be verified.
  142  *
  143  * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
  144  *    in the skb->ip_summed for a packet. Values are:
  145  *
  146  * CHECKSUM_PARTIAL:
  147  *
  148  *   The driver is required to checksum the packet as seen by hard_start_xmit()
  149  *   from skb->csum_start up to the end, and to record/write the checksum at
  150  *   offset skb->csum_start + skb->csum_offset. A driver may verify that the
  151  *   csum_start and csum_offset values are valid values given the length and
  152  *   offset of the packet, however they should not attempt to validate that the
  153  *   checksum refers to a legitimate transport layer checksum-- it is the
  154  *   purview of the stack to validate that csum_start and csum_offset are set
  155  *   correctly.
  156  *
  157  *   When the stack requests checksum offload for a packet, the driver MUST
  158  *   ensure that the checksum is set correctly. A driver can either offload the
  159  *   checksum calculation to the device, or call skb_checksum_help (in the case
  160  *   that the device does not support offload for a particular checksum).
  161  *
  162  *   NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
  163  *   NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
  164  *   checksum offload capability. If a	device has limited checksum capabilities
  165  *   (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
  166  *   described above) a helper function can be called to resolve
  167  *   CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
  168  *   function takes a spec argument that describes the protocol layer that is
  169  *   supported for checksum offload and can be called for each packet. If a
  170  *   packet does not match the specification for offload, skb_checksum_help
  171  *   is called to resolve the checksum.
  172  *
  173  * CHECKSUM_NONE:
  174  *
  175  *   The skb was already checksummed by the protocol, or a checksum is not
  176  *   required.
  177  *
  178  * CHECKSUM_UNNECESSARY:
  179  *
  180  *   This has the same meaning on as CHECKSUM_NONE for checksum offload on
  181  *   output.
  182  *
  183  * CHECKSUM_COMPLETE:
  184  *   Not used in checksum output. If a driver observes a packet with this value
  185  *   set in skbuff, if should treat as CHECKSUM_NONE being set.
  186  *
  187  * D. Non-IP checksum (CRC) offloads
  188  *
  189  *   NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
  190  *     offloading the SCTP CRC in a packet. To perform this offload the stack
  191  *     will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
  192  *     accordingly. Note the there is no indication in the skbuff that the
  193  *     CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
  194  *     both IP checksum offload and SCTP CRC offload must verify which offload
  195  *     is configured for a packet presumably by inspecting packet headers.
  196  *
  197  *   NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
  198  *     offloading the FCOE CRC in a packet. To perform this offload the stack
  199  *     will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
  200  *     accordingly. Note the there is no indication in the skbuff that the
  201  *     CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
  202  *     both IP checksum offload and FCOE CRC offload must verify which offload
  203  *     is configured for a packet presumably by inspecting packet headers.
  204  *
  205  * E. Checksumming on output with GSO.
  206  *
  207  * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
  208  * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
  209  * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
  210  * part of the GSO operation is implied. If a checksum is being offloaded
  211  * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
  212  * are set to refer to the outermost checksum being offload (two offloaded
  213  * checksums are possible with UDP encapsulation).
  214  */
  215 
  216 /* Don't change this without changing skb_csum_unnecessary! */
  217 #define CHECKSUM_NONE		0
  218 #define CHECKSUM_UNNECESSARY	1
  219 #define CHECKSUM_COMPLETE	2
  220 #define CHECKSUM_PARTIAL	3
  221 
  222 /* Maximum value in skb->csum_level */
  223 #define SKB_MAX_CSUM_LEVEL	3
  224 
  225 #define SKB_DATA_ALIGN(X)	ALIGN(X, SMP_CACHE_BYTES)
  226 #define SKB_WITH_OVERHEAD(X)	\
  227 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  228 #define SKB_MAX_ORDER(X, ORDER) \
  229 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
  230 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
  231 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
  232 
  233 /* return minimum truesize of one skb containing X bytes of data */
  234 #define SKB_TRUESIZE(X) ((X) +						\
  235 			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
  236 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  237 
  238 struct net_device;
  239 struct scatterlist;
  240 struct pipe_inode_info;
  241 struct iov_iter;
  242 struct napi_struct;
  243 
  244 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  245 struct nf_conntrack {
  246 	atomic_t use;
  247 };
  248 #endif
  249 
  250 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  251 struct nf_bridge_info {
  252 	atomic_t		use;
  253 	enum {
  254 		BRNF_PROTO_UNCHANGED,
  255 		BRNF_PROTO_8021Q,
  256 		BRNF_PROTO_PPPOE
  257 	} orig_proto:8;
  258 	u8			pkt_otherhost:1;
  259 	u8			in_prerouting:1;
  260 	u8			bridged_dnat:1;
  261 	__u16			frag_max_size;
  262 	struct net_device	*physindev;
  263 
  264 	/* always valid & non-NULL from FORWARD on, for physdev match */
  265 	struct net_device	*physoutdev;
  266 	union {
  267 		/* prerouting: detect dnat in orig/reply direction */
  268 		__be32          ipv4_daddr;
  269 		struct in6_addr ipv6_daddr;
  270 
  271 		/* after prerouting + nat detected: store original source
  272 		 * mac since neigh resolution overwrites it, only used while
  273 		 * skb is out in neigh layer.
  274 		 */
  275 		char neigh_header[8];
  276 	};
  277 };
  278 #endif
  279 
  280 struct sk_buff_head {
  281 	/* These two members must be first. */
  282 	struct sk_buff	*next;
  283 	struct sk_buff	*prev;
  284 
  285 	__u32		qlen;
  286 	spinlock_t	lock;
  287 };
  288 
  289 struct sk_buff;
  290 
  291 /* To allow 64K frame to be packed as single skb without frag_list we
  292  * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
  293  * buffers which do not start on a page boundary.
  294  *
  295  * Since GRO uses frags we allocate at least 16 regardless of page
  296  * size.
  297  */
  298 #if (65536/PAGE_SIZE + 1) < 16
  299 #define MAX_SKB_FRAGS 16UL
  300 #else
  301 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
  302 #endif
  303 extern int sysctl_max_skb_frags;
  304 
  305 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
  306  * segment using its current segmentation instead.
  307  */
  308 #define GSO_BY_FRAGS	0xFFFF
  309 
  310 typedef struct skb_frag_struct skb_frag_t;
  311 
  312 struct skb_frag_struct {
  313 	struct {
  314 		struct page *p;
  315 	} page;
  316 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
  317 	__u32 page_offset;
  318 	__u32 size;
  319 #else
  320 	__u16 page_offset;
  321 	__u16 size;
  322 #endif
  323 };
  324 
  325 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
  326 {
  327 	return frag->size;
  328 }
  329 
  330 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
  331 {
  332 	frag->size = size;
  333 }
  334 
  335 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
  336 {
  337 	frag->size += delta;
  338 }
  339 
  340 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
  341 {
  342 	frag->size -= delta;
  343 }
  344 
  345 #define HAVE_HW_TIME_STAMP
  346 
  347 /**
  348  * struct skb_shared_hwtstamps - hardware time stamps
  349  * @hwtstamp:	hardware time stamp transformed into duration
  350  *		since arbitrary point in time
  351  *
  352  * Software time stamps generated by ktime_get_real() are stored in
  353  * skb->tstamp.
  354  *
  355  * hwtstamps can only be compared against other hwtstamps from
  356  * the same device.
  357  *
  358  * This structure is attached to packets as part of the
  359  * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
  360  */
  361 struct skb_shared_hwtstamps {
  362 	ktime_t	hwtstamp;
  363 };
  364 
  365 /* Definitions for tx_flags in struct skb_shared_info */
  366 enum {
  367 	/* generate hardware time stamp */
  368 	SKBTX_HW_TSTAMP = 1 << 0,
  369 
  370 	/* generate software time stamp when queueing packet to NIC */
  371 	SKBTX_SW_TSTAMP = 1 << 1,
  372 
  373 	/* device driver is going to provide hardware time stamp */
  374 	SKBTX_IN_PROGRESS = 1 << 2,
  375 
  376 	/* device driver supports TX zero-copy buffers */
  377 	SKBTX_DEV_ZEROCOPY = 1 << 3,
  378 
  379 	/* generate wifi status information (where possible) */
  380 	SKBTX_WIFI_STATUS = 1 << 4,
  381 
  382 	/* This indicates at least one fragment might be overwritten
  383 	 * (as in vmsplice(), sendfile() ...)
  384 	 * If we need to compute a TX checksum, we'll need to copy
  385 	 * all frags to avoid possible bad checksum
  386 	 */
  387 	SKBTX_SHARED_FRAG = 1 << 5,
  388 
  389 	/* generate software time stamp when entering packet scheduling */
  390 	SKBTX_SCHED_TSTAMP = 1 << 6,
  391 };
  392 
  393 #define SKBTX_ANY_SW_TSTAMP	(SKBTX_SW_TSTAMP    | \
  394 				 SKBTX_SCHED_TSTAMP)
  395 #define SKBTX_ANY_TSTAMP	(SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
  396 
  397 /*
  398  * The callback notifies userspace to release buffers when skb DMA is done in
  399  * lower device, the skb last reference should be 0 when calling this.
  400  * The zerocopy_success argument is true if zero copy transmit occurred,
  401  * false on data copy or out of memory error caused by data copy attempt.
  402  * The ctx field is used to track device context.
  403  * The desc field is used to track userspace buffer index.
  404  */
  405 struct ubuf_info {
  406 	void (*callback)(struct ubuf_info *, bool zerocopy_success);
  407 	void *ctx;
  408 	unsigned long desc;
  409 };
  410 
  411 /* This data is invariant across clones and lives at
  412  * the end of the header data, ie. at skb->end.
  413  */
  414 struct skb_shared_info {
  415 	unsigned char	nr_frags;
  416 	__u8		tx_flags;
  417 	unsigned short	gso_size;
  418 	/* Warning: this field is not always filled in (UFO)! */
  419 	unsigned short	gso_segs;
  420 	unsigned short  gso_type;
  421 	struct sk_buff	*frag_list;
  422 	struct skb_shared_hwtstamps hwtstamps;
  423 	u32		tskey;
  424 	__be32          ip6_frag_id;
  425 
  426 	/*
  427 	 * Warning : all fields before dataref are cleared in __alloc_skb()
  428 	 */
  429 	atomic_t	dataref;
  430 
  431 	/* Intermediate layers must ensure that destructor_arg
  432 	 * remains valid until skb destructor */
  433 	void *		destructor_arg;
  434 
  435 	/* must be last field, see pskb_expand_head() */
  436 	skb_frag_t	frags[MAX_SKB_FRAGS];
  437 };
  438 
  439 /* We divide dataref into two halves.  The higher 16 bits hold references
  440  * to the payload part of skb->data.  The lower 16 bits hold references to
  441  * the entire skb->data.  A clone of a headerless skb holds the length of
  442  * the header in skb->hdr_len.
  443  *
  444  * All users must obey the rule that the skb->data reference count must be
  445  * greater than or equal to the payload reference count.
  446  *
  447  * Holding a reference to the payload part means that the user does not
  448  * care about modifications to the header part of skb->data.
  449  */
  450 #define SKB_DATAREF_SHIFT 16
  451 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
  452 
  453 
  454 enum {
  455 	SKB_FCLONE_UNAVAILABLE,	/* skb has no fclone (from head_cache) */
  456 	SKB_FCLONE_ORIG,	/* orig skb (from fclone_cache) */
  457 	SKB_FCLONE_CLONE,	/* companion fclone skb (from fclone_cache) */
  458 };
  459 
  460 enum {
  461 	SKB_GSO_TCPV4 = 1 << 0,
  462 	SKB_GSO_UDP = 1 << 1,
  463 
  464 	/* This indicates the skb is from an untrusted source. */
  465 	SKB_GSO_DODGY = 1 << 2,
  466 
  467 	/* This indicates the tcp segment has CWR set. */
  468 	SKB_GSO_TCP_ECN = 1 << 3,
  469 
  470 	SKB_GSO_TCP_FIXEDID = 1 << 4,
  471 
  472 	SKB_GSO_TCPV6 = 1 << 5,
  473 
  474 	SKB_GSO_FCOE = 1 << 6,
  475 
  476 	SKB_GSO_GRE = 1 << 7,
  477 
  478 	SKB_GSO_GRE_CSUM = 1 << 8,
  479 
  480 	SKB_GSO_IPXIP4 = 1 << 9,
  481 
  482 	SKB_GSO_IPXIP6 = 1 << 10,
  483 
  484 	SKB_GSO_UDP_TUNNEL = 1 << 11,
  485 
  486 	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
  487 
  488 	SKB_GSO_PARTIAL = 1 << 13,
  489 
  490 	SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
  491 
  492 	SKB_GSO_SCTP = 1 << 15,
  493 };
  494 
  495 #if BITS_PER_LONG > 32
  496 #define NET_SKBUFF_DATA_USES_OFFSET 1
  497 #endif
  498 
  499 #ifdef NET_SKBUFF_DATA_USES_OFFSET
  500 typedef unsigned int sk_buff_data_t;
  501 #else
  502 typedef unsigned char *sk_buff_data_t;
  503 #endif
  504 
  505 /**
  506  * struct skb_mstamp - multi resolution time stamps
  507  * @stamp_us: timestamp in us resolution
  508  * @stamp_jiffies: timestamp in jiffies
  509  */
  510 struct skb_mstamp {
  511 	union {
  512 		u64		v64;
  513 		struct {
  514 			u32	stamp_us;
  515 			u32	stamp_jiffies;
  516 		};
  517 	};
  518 };
  519 
  520 /**
  521  * skb_mstamp_get - get current timestamp
  522  * @cl: place to store timestamps
  523  */
  524 static inline void skb_mstamp_get(struct skb_mstamp *cl)
  525 {
  526 	u64 val = local_clock();
  527 
  528 	do_div(val, NSEC_PER_USEC);
  529 	cl->stamp_us = (u32)val;
  530 	cl->stamp_jiffies = (u32)jiffies;
  531 }
  532 
  533 /**
  534  * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
  535  * @t1: pointer to newest sample
  536  * @t0: pointer to oldest sample
  537  */
  538 static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  539 				      const struct skb_mstamp *t0)
  540 {
  541 	s32 delta_us = t1->stamp_us - t0->stamp_us;
  542 	u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
  543 
  544 	/* If delta_us is negative, this might be because interval is too big,
  545 	 * or local_clock() drift is too big : fallback using jiffies.
  546 	 */
  547 	if (delta_us <= 0 ||
  548 	    delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
  549 
  550 		delta_us = jiffies_to_usecs(delta_jiffies);
  551 
  552 	return delta_us;
  553 }
  554 
  555 static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
  556 				    const struct skb_mstamp *t0)
  557 {
  558 	s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
  559 
  560 	if (!diff)
  561 		diff = t1->stamp_us - t0->stamp_us;
  562 	return diff > 0;
  563 }
  564 
  565 /** 
  566  *	struct sk_buff - socket buffer
  567  *	@next: Next buffer in list
  568  *	@prev: Previous buffer in list
  569  *	@tstamp: Time we arrived/left
  570  *	@rbnode: RB tree node, alternative to next/prev for netem/tcp
  571  *	@sk: Socket we are owned by
  572  *	@dev: Device we arrived on/are leaving by
  573  *	@cb: Control buffer. Free for use by every layer. Put private vars here
  574  *	@_skb_refdst: destination entry (with norefcount bit)
  575  *	@sp: the security path, used for xfrm
  576  *	@len: Length of actual data
  577  *	@data_len: Data length
  578  *	@mac_len: Length of link layer header
  579  *	@hdr_len: writable header length of cloned skb
  580  *	@csum: Checksum (must include start/offset pair)
  581  *	@csum_start: Offset from skb->head where checksumming should start
  582  *	@csum_offset: Offset from csum_start where checksum should be stored
  583  *	@priority: Packet queueing priority
  584  *	@ignore_df: allow local fragmentation
  585  *	@cloned: Head may be cloned (check refcnt to be sure)
  586  *	@ip_summed: Driver fed us an IP checksum
  587  *	@nohdr: Payload reference only, must not modify header
  588  *	@nfctinfo: Relationship of this skb to the connection
  589  *	@pkt_type: Packet class
  590  *	@fclone: skbuff clone status
  591  *	@ipvs_property: skbuff is owned by ipvs
  592  *	@peeked: this packet has been seen already, so stats have been
  593  *		done for it, don't do them again
  594  *	@nf_trace: netfilter packet trace flag
  595  *	@protocol: Packet protocol from driver
  596  *	@destructor: Destruct function
  597  *	@nfct: Associated connection, if any
  598  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
  599  *	@skb_iif: ifindex of device we arrived on
  600  *	@tc_index: Traffic control index
  601  *	@tc_verd: traffic control verdict
  602  *	@hash: the packet hash
  603  *	@queue_mapping: Queue mapping for multiqueue devices
  604  *	@xmit_more: More SKBs are pending for this queue
  605  *	@ndisc_nodetype: router type (from link layer)
  606  *	@ooo_okay: allow the mapping of a socket to a queue to be changed
  607  *	@l4_hash: indicate hash is a canonical 4-tuple hash over transport
  608  *		ports.
  609  *	@sw_hash: indicates hash was computed in software stack
  610  *	@wifi_acked_valid: wifi_acked was set
  611  *	@wifi_acked: whether frame was acked on wifi or not
  612  *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
  613   *	@napi_id: id of the NAPI struct this skb came from
  614  *	@secmark: security marking
  615  *	@offload_fwd_mark: fwding offload mark
  616  *	@mark: Generic packet mark
  617  *	@vlan_proto: vlan encapsulation protocol
  618  *	@vlan_tci: vlan tag control information
  619  *	@inner_protocol: Protocol (encapsulation)
  620  *	@inner_transport_header: Inner transport layer header (encapsulation)
  621  *	@inner_network_header: Network layer header (encapsulation)
  622  *	@inner_mac_header: Link layer header (encapsulation)
  623  *	@transport_header: Transport layer header
  624  *	@network_header: Network layer header
  625  *	@mac_header: Link layer header
  626  *	@tail: Tail pointer
  627  *	@end: End pointer
  628  *	@head: Head of buffer
  629  *	@data: Data head pointer
  630  *	@truesize: Buffer size
  631  *	@users: User count - see {datagram,tcp}.c
  632  */
  633 
  634 struct sk_buff {
  635 	union {
  636 		struct {
  637 			/* These two members must be first. */
  638 			struct sk_buff		*next;
  639 			struct sk_buff		*prev;
  640 
  641 			union {
  642 				ktime_t		tstamp;
  643 				struct skb_mstamp skb_mstamp;
  644 			};
  645 		};
  646 		struct rb_node	rbnode; /* used in netem & tcp stack */
  647 	};
  648 	struct sock		*sk;
  649 	struct net_device	*dev;
  650 
  651 	/*
  652 	 * This is the control buffer. It is free to use for every
  653 	 * layer. Please put your private variables there. If you
  654 	 * want to keep them across layers you have to do a skb_clone()
  655 	 * first. This is owned by whoever has the skb queued ATM.
  656 	 */
  657 	char			cb[48] __aligned(8);
  658 
  659 	unsigned long		_skb_refdst;
  660 	void			(*destructor)(struct sk_buff *skb);
  661 #ifdef CONFIG_XFRM
  662 	struct	sec_path	*sp;
  663 #endif
  664 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  665 	struct nf_conntrack	*nfct;
  666 #endif
  667 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  668 	struct nf_bridge_info	*nf_bridge;
  669 #endif
  670 	unsigned int		len,
  671 				data_len;
  672 	__u16			mac_len,
  673 				hdr_len;
  674 
  675 	/* Following fields are _not_ copied in __copy_skb_header()
  676 	 * Note that queue_mapping is here mostly to fill a hole.
  677 	 */
  678 	kmemcheck_bitfield_begin(flags1);
  679 	__u16			queue_mapping;
  680 	__u8			cloned:1,
  681 				nohdr:1,
  682 				fclone:2,
  683 				peeked:1,
  684 				head_frag:1,
  685 				xmit_more:1;
  686 	/* one bit hole */
  687 	kmemcheck_bitfield_end(flags1);
  688 
  689 	/* fields enclosed in headers_start/headers_end are copied
  690 	 * using a single memcpy() in __copy_skb_header()
  691 	 */
  692 	/* private: */
  693 	__u32			headers_start[0];
  694 	/* public: */
  695 
  696 /* if you move pkt_type around you also must adapt those constants */
  697 #ifdef __BIG_ENDIAN_BITFIELD
  698 #define PKT_TYPE_MAX	(7 << 5)
  699 #else
  700 #define PKT_TYPE_MAX	7
  701 #endif
  702 #define PKT_TYPE_OFFSET()	offsetof(struct sk_buff, __pkt_type_offset)
  703 
  704 	__u8			__pkt_type_offset[0];
  705 	__u8			pkt_type:3;
  706 	__u8			pfmemalloc:1;
  707 	__u8			ignore_df:1;
  708 	__u8			nfctinfo:3;
  709 
  710 	__u8			nf_trace:1;
  711 	__u8			ip_summed:2;
  712 	__u8			ooo_okay:1;
  713 	__u8			l4_hash:1;
  714 	__u8			sw_hash:1;
  715 	__u8			wifi_acked_valid:1;
  716 	__u8			wifi_acked:1;
  717 
  718 	__u8			no_fcs:1;
  719 	/* Indicates the inner headers are valid in the skbuff. */
  720 	__u8			encapsulation:1;
  721 	__u8			encap_hdr_csum:1;
  722 	__u8			csum_valid:1;
  723 	__u8			csum_complete_sw:1;
  724 	__u8			csum_level:2;
  725 	__u8			csum_bad:1;
  726 
  727 #ifdef CONFIG_IPV6_NDISC_NODETYPE
  728 	__u8			ndisc_nodetype:2;
  729 #endif
  730 	__u8			ipvs_property:1;
  731 	__u8			inner_protocol_type:1;
  732 	__u8			remcsum_offload:1;
  733 	/* 3 or 5 bit hole */
  734 
  735 #ifdef CONFIG_NET_SCHED
  736 	__u16			tc_index;	/* traffic control index */
  737 #ifdef CONFIG_NET_CLS_ACT
  738 	__u16			tc_verd;	/* traffic control verdict */
  739 #endif
  740 #endif
  741 
  742 	union {
  743 		__wsum		csum;
  744 		struct {
  745 			__u16	csum_start;
  746 			__u16	csum_offset;
  747 		};
  748 	};
  749 	__u32			priority;
  750 	int			skb_iif;
  751 	__u32			hash;
  752 	__be16			vlan_proto;
  753 	__u16			vlan_tci;
  754 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
  755 	union {
  756 		unsigned int	napi_id;
  757 		unsigned int	sender_cpu;
  758 	};
  759 #endif
  760 	union {
  761 #ifdef CONFIG_NETWORK_SECMARK
  762 		__u32		secmark;
  763 #endif
  764 #ifdef CONFIG_NET_SWITCHDEV
  765 		__u32		offload_fwd_mark;
  766 #endif
  767 	};
  768 
  769 	union {
  770 		__u32		mark;
  771 		__u32		reserved_tailroom;
  772 	};
  773 
  774 	union {
  775 		__be16		inner_protocol;
  776 		__u8		inner_ipproto;
  777 	};
  778 
  779 	__u16			inner_transport_header;
  780 	__u16			inner_network_header;
  781 	__u16			inner_mac_header;
  782 
  783 	__be16			protocol;
  784 	__u16			transport_header;
  785 	__u16			network_header;
  786 	__u16			mac_header;
  787 
  788 	/* private: */
  789 	__u32			headers_end[0];
  790 	/* public: */
  791 
  792 	/* These elements must be at the end, see alloc_skb() for details.  */
  793 	sk_buff_data_t		tail;
  794 	sk_buff_data_t		end;
  795 	unsigned char		*head,
  796 				*data;
  797 	unsigned int		truesize;
  798 	atomic_t		users;
  799 };
  800 
  801 #ifdef __KERNEL__
  802 /*
  803  *	Handling routines are only of interest to the kernel
  804  */
  805 #include <linux/slab.h>
  806 
  807 
  808 #define SKB_ALLOC_FCLONE	0x01
  809 #define SKB_ALLOC_RX		0x02
  810 #define SKB_ALLOC_NAPI		0x04
  811 
  812 /* Returns true if the skb was allocated from PFMEMALLOC reserves */
  813 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
  814 {
  815 	return unlikely(skb->pfmemalloc);
  816 }
  817 
  818 /*
  819  * skb might have a dst pointer attached, refcounted or not.
  820  * _skb_refdst low order bit is set if refcount was _not_ taken
  821  */
  822 #define SKB_DST_NOREF	1UL
  823 #define SKB_DST_PTRMASK	~(SKB_DST_NOREF)
  824 
  825 /**
  826  * skb_dst - returns skb dst_entry
  827  * @skb: buffer
  828  *
  829  * Returns skb dst_entry, regardless of reference taken or not.
  830  */
  831 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
  832 {
  833 	/* If refdst was not refcounted, check we still are in a 
  834 	 * rcu_read_lock section
  835 	 */
  836 	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
  837 		!rcu_read_lock_held() &&
  838 		!rcu_read_lock_bh_held());
  839 	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
  840 }
  841 
  842 /**
  843  * skb_dst_set - sets skb dst
  844  * @skb: buffer
  845  * @dst: dst entry
  846  *
  847  * Sets skb dst, assuming a reference was taken on dst and should
  848  * be released by skb_dst_drop()
  849  */
  850 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
  851 {
  852 	skb->_skb_refdst = (unsigned long)dst;
  853 }
  854 
  855 /**
  856  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
  857  * @skb: buffer
  858  * @dst: dst entry
  859  *
  860  * Sets skb dst, assuming a reference was not taken on dst.
  861  * If dst entry is cached, we do not take reference and dst_release
  862  * will be avoided by refdst_drop. If dst entry is not cached, we take
  863  * reference, so that last dst_release can destroy the dst immediately.
  864  */
  865 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
  866 {
  867 	WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
  868 	skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
  869 }
  870 
  871 /**
  872  * skb_dst_is_noref - Test if skb dst isn't refcounted
  873  * @skb: buffer
  874  */
  875 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
  876 {
  877 	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
  878 }
  879 
  880 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
  881 {
  882 	return (struct rtable *)skb_dst(skb);
  883 }
  884 
  885 /* For mangling skb->pkt_type from user space side from applications
  886  * such as nft, tc, etc, we only allow a conservative subset of
  887  * possible pkt_types to be set.
  888 */
  889 static inline bool skb_pkt_type_ok(u32 ptype)
  890 {
  891 	return ptype <= PACKET_OTHERHOST;
  892 }
  893 
  894 void kfree_skb(struct sk_buff *skb);
  895 void kfree_skb_list(struct sk_buff *segs);
  896 void skb_tx_error(struct sk_buff *skb);
  897 void consume_skb(struct sk_buff *skb);
  898 void  __kfree_skb(struct sk_buff *skb);
  899 extern struct kmem_cache *skbuff_head_cache;
  900 
  901 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
  902 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
  903 		      bool *fragstolen, int *delta_truesize);
  904 
  905 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
  906 			    int node);
  907 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
  908 struct sk_buff *build_skb(void *data, unsigned int frag_size);
  909 static inline struct sk_buff *alloc_skb(unsigned int size,
  910 					gfp_t priority)
  911 {
  912 	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
  913 }
  914 
  915 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
  916 				     unsigned long data_len,
  917 				     int max_page_order,
  918 				     int *errcode,
  919 				     gfp_t gfp_mask);
  920 
  921 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
  922 struct sk_buff_fclones {
  923 	struct sk_buff	skb1;
  924 
  925 	struct sk_buff	skb2;
  926 
  927 	atomic_t	fclone_ref;
  928 };
  929 
  930 /**
  931  *	skb_fclone_busy - check if fclone is busy
  932  *	@skb: buffer
  933  *
  934  * Returns true if skb is a fast clone, and its clone is not freed.
  935  * Some drivers call skb_orphan() in their ndo_start_xmit(),
  936  * so we also check that this didnt happen.
  937  */
  938 static inline bool skb_fclone_busy(const struct sock *sk,
  939 				   const struct sk_buff *skb)
  940 {
  941 	const struct sk_buff_fclones *fclones;
  942 
  943 	fclones = container_of(skb, struct sk_buff_fclones, skb1);
  944 
  945 	return skb->fclone == SKB_FCLONE_ORIG &&
  946 	       atomic_read(&fclones->fclone_ref) > 1 &&
  947 	       fclones->skb2.sk == sk;
  948 }
  949 
  950 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
  951 					       gfp_t priority)
  952 {
  953 	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
  954 }
  955 
  956 struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
  957 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
  958 {
  959 	return __alloc_skb_head(priority, -1);
  960 }
  961 
  962 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
  963 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
  964 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
  965 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
  966 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
  967 				   gfp_t gfp_mask, bool fclone);
  968 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
  969 					  gfp_t gfp_mask)
  970 {
  971 	return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
  972 }
  973 
  974 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
  975 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
  976 				     unsigned int headroom);
  977 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
  978 				int newtailroom, gfp_t priority);
  979 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
  980 			int offset, int len);
  981 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
  982 		 int len);
  983 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
  984 int skb_pad(struct sk_buff *skb, int pad);
  985 #define dev_kfree_skb(a)	consume_skb(a)
  986 
  987 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
  988 			    int getfrag(void *from, char *to, int offset,
  989 					int len, int odd, struct sk_buff *skb),
  990 			    void *from, int length);
  991 
  992 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
  993 			 int offset, size_t size);
  994 
  995 struct skb_seq_state {
  996 	__u32		lower_offset;
  997 	__u32		upper_offset;
  998 	__u32		frag_idx;
  999 	__u32		stepped_offset;
 1000 	struct sk_buff	*root_skb;
 1001 	struct sk_buff	*cur_skb;
 1002 	__u8		*frag_data;
 1003 };
 1004 
 1005 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
 1006 			  unsigned int to, struct skb_seq_state *st);
 1007 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
 1008 			  struct skb_seq_state *st);
 1009 void skb_abort_seq_read(struct skb_seq_state *st);
 1010 
 1011 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
 1012 			   unsigned int to, struct ts_config *config);
 1013 
 1014 /*
 1015  * Packet hash types specify the type of hash in skb_set_hash.
 1016  *
 1017  * Hash types refer to the protocol layer addresses which are used to
 1018  * construct a packet's hash. The hashes are used to differentiate or identify
 1019  * flows of the protocol layer for the hash type. Hash types are either
 1020  * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
 1021  *
 1022  * Properties of hashes:
 1023  *
 1024  * 1) Two packets in different flows have different hash values
 1025  * 2) Two packets in the same flow should have the same hash value
 1026  *
 1027  * A hash at a higher layer is considered to be more specific. A driver should
 1028  * set the most specific hash possible.
 1029  *
 1030  * A driver cannot indicate a more specific hash than the layer at which a hash
 1031  * was computed. For instance an L3 hash cannot be set as an L4 hash.
 1032  *
 1033  * A driver may indicate a hash level which is less specific than the
 1034  * actual layer the hash was computed on. For instance, a hash computed
 1035  * at L4 may be considered an L3 hash. This should only be done if the
 1036  * driver can't unambiguously determine that the HW computed the hash at
 1037  * the higher layer. Note that the "should" in the second property above
 1038  * permits this.
 1039  */
 1040 enum pkt_hash_types {
 1041 	PKT_HASH_TYPE_NONE,	/* Undefined type */
 1042 	PKT_HASH_TYPE_L2,	/* Input: src_MAC, dest_MAC */
 1043 	PKT_HASH_TYPE_L3,	/* Input: src_IP, dst_IP */
 1044 	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
 1045 };
 1046 
 1047 static inline void skb_clear_hash(struct sk_buff *skb)
 1048 {
 1049 	skb->hash = 0;
 1050 	skb->sw_hash = 0;
 1051 	skb->l4_hash = 0;
 1052 }
 1053 
 1054 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
 1055 {
 1056 	if (!skb->l4_hash)
 1057 		skb_clear_hash(skb);
 1058 }
 1059 
 1060 static inline void
 1061 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
 1062 {
 1063 	skb->l4_hash = is_l4;
 1064 	skb->sw_hash = is_sw;
 1065 	skb->hash = hash;
 1066 }
 1067 
 1068 static inline void
 1069 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
 1070 {
 1071 	/* Used by drivers to set hash from HW */
 1072 	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
 1073 }
 1074 
 1075 static inline void
 1076 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
 1077 {
 1078 	__skb_set_hash(skb, hash, true, is_l4);
 1079 }
 1080 
 1081 void __skb_get_hash(struct sk_buff *skb);
 1082 u32 __skb_get_hash_symmetric(struct sk_buff *skb);
 1083 u32 skb_get_poff(const struct sk_buff *skb);
 1084 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
 1085 		   const struct flow_keys *keys, int hlen);
 1086 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
 1087 			    void *data, int hlen_proto);
 1088 
 1089 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
 1090 					int thoff, u8 ip_proto)
 1091 {
 1092 	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
 1093 }
 1094 
 1095 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
 1096 			     const struct flow_dissector_key *key,
 1097 			     unsigned int key_count);
 1098 
 1099 bool __skb_flow_dissect(const struct sk_buff *skb,
 1100 			struct flow_dissector *flow_dissector,
 1101 			void *target_container,
 1102 			void *data, __be16 proto, int nhoff, int hlen,
 1103 			unsigned int flags);
 1104 
 1105 static inline bool skb_flow_dissect(const struct sk_buff *skb,
 1106 				    struct flow_dissector *flow_dissector,
 1107 				    void *target_container, unsigned int flags)
 1108 {
 1109 	return __skb_flow_dissect(skb, flow_dissector, target_container,
 1110 				  NULL, 0, 0, 0, flags);
 1111 }
 1112 
 1113 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
 1114 					      struct flow_keys *flow,
 1115 					      unsigned int flags)
 1116 {
 1117 	memset(flow, 0, sizeof(*flow));
 1118 	return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
 1119 				  NULL, 0, 0, 0, flags);
 1120 }
 1121 
 1122 static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
 1123 						  void *data, __be16 proto,
 1124 						  int nhoff, int hlen,
 1125 						  unsigned int flags)
 1126 {
 1127 	memset(flow, 0, sizeof(*flow));
 1128 	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
 1129 				  data, proto, nhoff, hlen, flags);
 1130 }
 1131 
 1132 static inline __u32 skb_get_hash(struct sk_buff *skb)
 1133 {
 1134 	if (!skb->l4_hash && !skb->sw_hash)
 1135 		__skb_get_hash(skb);
 1136 
 1137 	return skb->hash;
 1138 }
 1139 
 1140 __u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
 1141 
 1142 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
 1143 {
 1144 	if (!skb->l4_hash && !skb->sw_hash) {
 1145 		struct flow_keys keys;
 1146 		__u32 hash = __get_hash_from_flowi6(fl6, &keys);
 1147 
 1148 		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 1149 	}
 1150 
 1151 	return skb->hash;
 1152 }
 1153 
 1154 __u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
 1155 
 1156 static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
 1157 {
 1158 	if (!skb->l4_hash && !skb->sw_hash) {
 1159 		struct flow_keys keys;
 1160 		__u32 hash = __get_hash_from_flowi4(fl4, &keys);
 1161 
 1162 		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 1163 	}
 1164 
 1165 	return skb->hash;
 1166 }
 1167 
 1168 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
 1169 
 1170 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 1171 {
 1172 	return skb->hash;
 1173 }
 1174 
 1175 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 1176 {
 1177 	to->hash = from->hash;
 1178 	to->sw_hash = from->sw_hash;
 1179 	to->l4_hash = from->l4_hash;
 1180 };
 1181 
 1182 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 1183 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 1184 {
 1185 	return skb->head + skb->end;
 1186 }
 1187 
 1188 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
 1189 {
 1190 	return skb->end;
 1191 }
 1192 #else
 1193 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 1194 {
 1195 	return skb->end;
 1196 }
 1197 
 1198 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
 1199 {
 1200 	return skb->end - skb->head;
 1201 }
 1202 #endif
 1203 
 1204 /* Internal */
 1205 #define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
 1206 
 1207 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 1208 {
 1209 	return &skb_shinfo(skb)->hwtstamps;
 1210 }
 1211 
 1212 /**
 1213  *	skb_queue_empty - check if a queue is empty
 1214  *	@list: queue head
 1215  *
 1216  *	Returns true if the queue is empty, false otherwise.
 1217  */
 1218 static inline int skb_queue_empty(const struct sk_buff_head *list)
 1219 {
 1220 	return list->next == (const struct sk_buff *) list;
 1221 }
 1222 
 1223 /**
 1224  *	skb_queue_is_last - check if skb is the last entry in the queue
 1225  *	@list: queue head
 1226  *	@skb: buffer
 1227  *
 1228  *	Returns true if @skb is the last buffer on the list.
 1229  */
 1230 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
 1231 				     const struct sk_buff *skb)
 1232 {
 1233 	return skb->next == (const struct sk_buff *) list;
 1234 }
 1235 
 1236 /**
 1237  *	skb_queue_is_first - check if skb is the first entry in the queue
 1238  *	@list: queue head
 1239  *	@skb: buffer
 1240  *
 1241  *	Returns true if @skb is the first buffer on the list.
 1242  */
 1243 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
 1244 				      const struct sk_buff *skb)
 1245 {
 1246 	return skb->prev == (const struct sk_buff *) list;
 1247 }
 1248 
 1249 /**
 1250  *	skb_queue_next - return the next packet in the queue
 1251  *	@list: queue head
 1252  *	@skb: current buffer
 1253  *
 1254  *	Return the next packet in @list after @skb.  It is only valid to
 1255  *	call this if skb_queue_is_last() evaluates to false.
 1256  */
 1257 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
 1258 					     const struct sk_buff *skb)
 1259 {
 1260 	/* This BUG_ON may seem severe, but if we just return then we
 1261 	 * are going to dereference garbage.
 1262 	 */
 1263 	BUG_ON(skb_queue_is_last(list, skb));
 1264 	return skb->next;
 1265 }
 1266 
 1267 /**
 1268  *	skb_queue_prev - return the prev packet in the queue
 1269  *	@list: queue head
 1270  *	@skb: current buffer
 1271  *
 1272  *	Return the prev packet in @list before @skb.  It is only valid to
 1273  *	call this if skb_queue_is_first() evaluates to false.
 1274  */
 1275 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
 1276 					     const struct sk_buff *skb)
 1277 {
 1278 	/* This BUG_ON may seem severe, but if we just return then we
 1279 	 * are going to dereference garbage.
 1280 	 */
 1281 	BUG_ON(skb_queue_is_first(list, skb));
 1282 	return skb->prev;
 1283 }
 1284 
 1285 /**
 1286  *	skb_get - reference buffer
 1287  *	@skb: buffer to reference
 1288  *
 1289  *	Makes another reference to a socket buffer and returns a pointer
 1290  *	to the buffer.
 1291  */
 1292 static inline struct sk_buff *skb_get(struct sk_buff *skb)
 1293 {
 1294 	atomic_inc(&skb->users);
 1295 	return skb;
 1296 }
 1297 
 1298 /*
 1299  * If users == 1, we are the only owner and are can avoid redundant
 1300  * atomic change.
 1301  */
 1302 
 1303 /**
 1304  *	skb_cloned - is the buffer a clone
 1305  *	@skb: buffer to check
 1306  *
 1307  *	Returns true if the buffer was generated with skb_clone() and is
 1308  *	one of multiple shared copies of the buffer. Cloned buffers are
 1309  *	shared data so must not be written to under normal circumstances.
 1310  */
 1311 static inline int skb_cloned(const struct sk_buff *skb)
 1312 {
 1313 	return skb->cloned &&
 1314 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
 1315 }
 1316 
 1317 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
 1318 {
 1319 	might_sleep_if(gfpflags_allow_blocking(pri));
 1320 
 1321 	if (skb_cloned(skb))
 1322 		return pskb_expand_head(skb, 0, 0, pri);
 1323 
 1324 	return 0;
 1325 }
 1326 
 1327 /**
 1328  *	skb_header_cloned - is the header a clone
 1329  *	@skb: buffer to check
 1330  *
 1331  *	Returns true if modifying the header part of the buffer requires
 1332  *	the data to be copied.
 1333  */
 1334 static inline int skb_header_cloned(const struct sk_buff *skb)
 1335 {
 1336 	int dataref;
 1337 
 1338 	if (!skb->cloned)
 1339 		return 0;
 1340 
 1341 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
 1342 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
 1343 	return dataref != 1;
 1344 }
 1345 
 1346 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
 1347 {
 1348 	might_sleep_if(gfpflags_allow_blocking(pri));
 1349 
 1350 	if (skb_header_cloned(skb))
 1351 		return pskb_expand_head(skb, 0, 0, pri);
 1352 
 1353 	return 0;
 1354 }
 1355 
 1356 /**
 1357  *	skb_header_release - release reference to header
 1358  *	@skb: buffer to operate on
 1359  *
 1360  *	Drop a reference to the header part of the buffer.  This is done
 1361  *	by acquiring a payload reference.  You must not read from the header
 1362  *	part of skb->data after this.
 1363  *	Note : Check if you can use __skb_header_release() instead.
 1364  */
 1365 static inline void skb_header_release(struct sk_buff *skb)
 1366 {
 1367 	BUG_ON(skb->nohdr);
 1368 	skb->nohdr = 1;
 1369 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
 1370 }
 1371 
 1372 /**
 1373  *	__skb_header_release - release reference to header
 1374  *	@skb: buffer to operate on
 1375  *
 1376  *	Variant of skb_header_release() assuming skb is private to caller.
 1377  *	We can avoid one atomic operation.
 1378  */
 1379 static inline void __skb_header_release(struct sk_buff *skb)
 1380 {
 1381 	skb->nohdr = 1;
 1382 	atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
 1383 }
 1384 
 1385 
 1386 /**
 1387  *	skb_shared - is the buffer shared
 1388  *	@skb: buffer to check
 1389  *
 1390  *	Returns true if more than one person has a reference to this
 1391  *	buffer.
 1392  */
 1393 static inline int skb_shared(const struct sk_buff *skb)
 1394 {
 1395 	return atomic_read(&skb->users) != 1;
 1396 }
 1397 
 1398 /**
 1399  *	skb_share_check - check if buffer is shared and if so clone it
 1400  *	@skb: buffer to check
 1401  *	@pri: priority for memory allocation
 1402  *
 1403  *	If the buffer is shared the buffer is cloned and the old copy
 1404  *	drops a reference. A new clone with a single reference is returned.
 1405  *	If the buffer is not shared the original buffer is returned. When
 1406  *	being called from interrupt status or with spinlocks held pri must
 1407  *	be GFP_ATOMIC.
 1408  *
 1409  *	NULL is returned on a memory allocation failure.
 1410  */
 1411 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
 1412 {
 1413 	might_sleep_if(gfpflags_allow_blocking(pri));
 1414 	if (skb_shared(skb)) {
 1415 		struct sk_buff *nskb = skb_clone(skb, pri);
 1416 
 1417 		if (likely(nskb))
 1418 			consume_skb(skb);
 1419 		else
 1420 			kfree_skb(skb);
 1421 		skb = nskb;
 1422 	}
 1423 	return skb;
 1424 }
 1425 
 1426 /*
 1427  *	Copy shared buffers into a new sk_buff. We effectively do COW on
 1428  *	packets to handle cases where we have a local reader and forward
 1429  *	and a couple of other messy ones. The normal one is tcpdumping
 1430  *	a packet thats being forwarded.
 1431  */
 1432 
 1433 /**
 1434  *	skb_unshare - make a copy of a shared buffer
 1435  *	@skb: buffer to check
 1436  *	@pri: priority for memory allocation
 1437  *
 1438  *	If the socket buffer is a clone then this function creates a new
 1439  *	copy of the data, drops a reference count on the old copy and returns
 1440  *	the new copy with the reference count at 1. If the buffer is not a clone
 1441  *	the original buffer is returned. When called with a spinlock held or
 1442  *	from interrupt state @pri must be %GFP_ATOMIC
 1443  *
 1444  *	%NULL is returned on a memory allocation failure.
 1445  */
 1446 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
 1447 					  gfp_t pri)
 1448 {
 1449 	might_sleep_if(gfpflags_allow_blocking(pri));
 1450 	if (skb_cloned(skb)) {
 1451 		struct sk_buff *nskb = skb_copy(skb, pri);
 1452 
 1453 		/* Free our shared copy */
 1454 		if (likely(nskb))
 1455 			consume_skb(skb);
 1456 		else
 1457 			kfree_skb(skb);
 1458 		skb = nskb;
 1459 	}
 1460 	return skb;
 1461 }
 1462 
 1463 /**
 1464  *	skb_peek - peek at the head of an &sk_buff_head
 1465  *	@list_: list to peek at
 1466  *
 1467  *	Peek an &sk_buff. Unlike most other operations you _MUST_
 1468  *	be careful with this one. A peek leaves the buffer on the
 1469  *	list and someone else may run off with it. You must hold
 1470  *	the appropriate locks or have a private queue to do this.
 1471  *
 1472  *	Returns %NULL for an empty list or a pointer to the head element.
 1473  *	The reference count is not incremented and the reference is therefore
 1474  *	volatile. Use with caution.
 1475  */
 1476 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
 1477 {
 1478 	struct sk_buff *skb = list_->next;
 1479 
 1480 	if (skb == (struct sk_buff *)list_)
 1481 		skb = NULL;
 1482 	return skb;
 1483 }
 1484 
 1485 /**
 1486  *	skb_peek_next - peek skb following the given one from a queue
 1487  *	@skb: skb to start from
 1488  *	@list_: list to peek at
 1489  *
 1490  *	Returns %NULL when the end of the list is met or a pointer to the
 1491  *	next element. The reference count is not incremented and the
 1492  *	reference is therefore volatile. Use with caution.
 1493  */
 1494 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
 1495 		const struct sk_buff_head *list_)
 1496 {
 1497 	struct sk_buff *next = skb->next;
 1498 
 1499 	if (next == (struct sk_buff *)list_)
 1500 		next = NULL;
 1501 	return next;
 1502 }
 1503 
 1504 /**
 1505  *	skb_peek_tail - peek at the tail of an &sk_buff_head
 1506  *	@list_: list to peek at
 1507  *
 1508  *	Peek an &sk_buff. Unlike most other operations you _MUST_
 1509  *	be careful with this one. A peek leaves the buffer on the
 1510  *	list and someone else may run off with it. You must hold
 1511  *	the appropriate locks or have a private queue to do this.
 1512  *
 1513  *	Returns %NULL for an empty list or a pointer to the tail element.
 1514  *	The reference count is not incremented and the reference is therefore
 1515  *	volatile. Use with caution.
 1516  */
 1517 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
 1518 {
 1519 	struct sk_buff *skb = list_->prev;
 1520 
 1521 	if (skb == (struct sk_buff *)list_)
 1522 		skb = NULL;
 1523 	return skb;
 1524 
 1525 }
 1526 
 1527 /**
 1528  *	skb_queue_len	- get queue length
 1529  *	@list_: list to measure
 1530  *
 1531  *	Return the length of an &sk_buff queue.
 1532  */
 1533 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
 1534 {
 1535 	return list_->qlen;
 1536 }
 1537 
 1538 /**
 1539  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 1540  *	@list: queue to initialize
 1541  *
 1542  *	This initializes only the list and queue length aspects of
 1543  *	an sk_buff_head object.  This allows to initialize the list
 1544  *	aspects of an sk_buff_head without reinitializing things like
 1545  *	the spinlock.  It can also be used for on-stack sk_buff_head
 1546  *	objects where the spinlock is known to not be used.
 1547  */
 1548 static inline void __skb_queue_head_init(struct sk_buff_head *list)
 1549 {
 1550 	list->prev = list->next = (struct sk_buff *)list;
 1551 	list->qlen = 0;
 1552 }
 1553 
 1554 /*
 1555  * This function creates a split out lock class for each invocation;
 1556  * this is needed for now since a whole lot of users of the skb-queue
 1557  * infrastructure in drivers have different locking usage (in hardirq)
 1558  * than the networking core (in softirq only). In the long run either the
 1559  * network layer or drivers should need annotation to consolidate the
 1560  * main types of usage into 3 classes.
 1561  */
 1562 static inline void skb_queue_head_init(struct sk_buff_head *list)
 1563 {
 1564 	spin_lock_init(&list->lock);
 1565 	__skb_queue_head_init(list);
 1566 }
 1567 
 1568 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
 1569 		struct lock_class_key *class)
 1570 {
 1571 	skb_queue_head_init(list);
 1572 	lockdep_set_class(&list->lock, class);
 1573 }
 1574 
 1575 /*
 1576  *	Insert an sk_buff on a list.
 1577  *
 1578  *	The "__skb_xxxx()" functions are the non-atomic ones that
 1579  *	can only be called with interrupts disabled.
 1580  */
 1581 void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
 1582 		struct sk_buff_head *list);
 1583 static inline void __skb_insert(struct sk_buff *newsk,
 1584 				struct sk_buff *prev, struct sk_buff *next,
 1585 				struct sk_buff_head *list)
 1586 {
 1587 	newsk->next = next;
 1588 	newsk->prev = prev;
 1589 	next->prev  = prev->next = newsk;
 1590 	list->qlen++;
 1591 }
 1592 
 1593 static inline void __skb_queue_splice(const struct sk_buff_head *list,
 1594 				      struct sk_buff *prev,
 1595 				      struct sk_buff *next)
 1596 {
 1597 	struct sk_buff *first = list->next;
 1598 	struct sk_buff *last = list->prev;
 1599 
 1600 	first->prev = prev;
 1601 	prev->next = first;
 1602 
 1603 	last->next = next;
 1604 	next->prev = last;
 1605 }
 1606 
 1607 /**
 1608  *	skb_queue_splice - join two skb lists, this is designed for stacks
 1609  *	@list: the new list to add
 1610  *	@head: the place to add it in the first list
 1611  */
 1612 static inline void skb_queue_splice(const struct sk_buff_head *list,
 1613 				    struct sk_buff_head *head)
 1614 {
 1615 	if (!skb_queue_empty(list)) {
 1616 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
 1617 		head->qlen += list->qlen;
 1618 	}
 1619 }
 1620 
 1621 /**
 1622  *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
 1623  *	@list: the new list to add
 1624  *	@head: the place to add it in the first list
 1625  *
 1626  *	The list at @list is reinitialised
 1627  */
 1628 static inline void skb_queue_splice_init(struct sk_buff_head *list,
 1629 					 struct sk_buff_head *head)
 1630 {
 1631 	if (!skb_queue_empty(list)) {
 1632 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
 1633 		head->qlen += list->qlen;
 1634 		__skb_queue_head_init(list);
 1635 	}
 1636 }
 1637 
 1638 /**
 1639  *	skb_queue_splice_tail - join two skb lists, each list being a queue
 1640  *	@list: the new list to add
 1641  *	@head: the place to add it in the first list
 1642  */
 1643 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
 1644 					 struct sk_buff_head *head)
 1645 {
 1646 	if (!skb_queue_empty(list)) {
 1647 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
 1648 		head->qlen += list->qlen;
 1649 	}
 1650 }
 1651 
 1652 /**
 1653  *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
 1654  *	@list: the new list to add
 1655  *	@head: the place to add it in the first list
 1656  *
 1657  *	Each of the lists is a queue.
 1658  *	The list at @list is reinitialised
 1659  */
 1660 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
 1661 					      struct sk_buff_head *head)
 1662 {
 1663 	if (!skb_queue_empty(list)) {
 1664 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
 1665 		head->qlen += list->qlen;
 1666 		__skb_queue_head_init(list);
 1667 	}
 1668 }
 1669 
 1670 /**
 1671  *	__skb_queue_after - queue a buffer at the list head
 1672  *	@list: list to use
 1673  *	@prev: place after this buffer
 1674  *	@newsk: buffer to queue
 1675  *
 1676  *	Queue a buffer int the middle of a list. This function takes no locks
 1677  *	and you must therefore hold required locks before calling it.
 1678  *
 1679  *	A buffer cannot be placed on two lists at the same time.
 1680  */
 1681 static inline void __skb_queue_after(struct sk_buff_head *list,
 1682 				     struct sk_buff *prev,
 1683 				     struct sk_buff *newsk)
 1684 {
 1685 	__skb_insert(newsk, prev, prev->next, list);
 1686 }
 1687 
 1688 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
 1689 		struct sk_buff_head *list);
 1690 
 1691 static inline void __skb_queue_before(struct sk_buff_head *list,
 1692 				      struct sk_buff *next,
 1693 				      struct sk_buff *newsk)
 1694 {
 1695 	__skb_insert(newsk, next->prev, next, list);
 1696 }
 1697 
 1698 /**
 1699  *	__skb_queue_head - queue a buffer at the list head
 1700  *	@list: list to use
 1701  *	@newsk: buffer to queue
 1702  *
 1703  *	Queue a buffer at the start of a list. This function takes no locks
 1704  *	and you must therefore hold required locks before calling it.
 1705  *
 1706  *	A buffer cannot be placed on two lists at the same time.
 1707  */
 1708 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
 1709 static inline void __skb_queue_head(struct sk_buff_head *list,
 1710 				    struct sk_buff *newsk)
 1711 {
 1712 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
 1713 }
 1714 
 1715 /**
 1716  *	__skb_queue_tail - queue a buffer at the list tail
 1717  *	@list: list to use
 1718  *	@newsk: buffer to queue
 1719  *
 1720  *	Queue a buffer at the end of a list. This function takes no locks
 1721  *	and you must therefore hold required locks before calling it.
 1722  *
 1723  *	A buffer cannot be placed on two lists at the same time.
 1724  */
 1725 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
 1726 static inline void __skb_queue_tail(struct sk_buff_head *list,
 1727 				   struct sk_buff *newsk)
 1728 {
 1729 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
 1730 }
 1731 
 1732 /*
 1733  * remove sk_buff from list. _Must_ be called atomically, and with
 1734  * the list known..
 1735  */
 1736 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
 1737 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 1738 {
 1739 	struct sk_buff *next, *prev;
 1740 
 1741 	list->qlen--;
 1742 	next	   = skb->next;
 1743 	prev	   = skb->prev;
 1744 	skb->next  = skb->prev = NULL;
 1745 	next->prev = prev;
 1746 	prev->next = next;
 1747 }
 1748 
 1749 /**
 1750  *	__skb_dequeue - remove from the head of the queue
 1751  *	@list: list to dequeue from
 1752  *
 1753  *	Remove the head of the list. This function does not take any locks
 1754  *	so must be used with appropriate locks held only. The head item is
 1755  *	returned or %NULL if the list is empty.
 1756  */
 1757 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
 1758 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 1759 {
 1760 	struct sk_buff *skb = skb_peek(list);
 1761 	if (skb)
 1762 		__skb_unlink(skb, list);
 1763 	return skb;
 1764 }
 1765 
 1766 /**
 1767  *	__skb_dequeue_tail - remove from the tail of the queue
 1768  *	@list: list to dequeue from
 1769  *
 1770  *	Remove the tail of the list. This function does not take any locks
 1771  *	so must be used with appropriate locks held only. The tail item is
 1772  *	returned or %NULL if the list is empty.
 1773  */
 1774 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
 1775 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
 1776 {
 1777 	struct sk_buff *skb = skb_peek_tail(list);
 1778 	if (skb)
 1779 		__skb_unlink(skb, list);
 1780 	return skb;
 1781 }
 1782 
 1783 
 1784 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
 1785 {
 1786 	return skb->data_len;
 1787 }
 1788 
 1789 static inline unsigned int skb_headlen(const struct sk_buff *skb)
 1790 {
 1791 	return skb->len - skb->data_len;
 1792 }
 1793 
 1794 static inline int skb_pagelen(const struct sk_buff *skb)
 1795 {
 1796 	int i, len = 0;
 1797 
 1798 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
 1799 		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
 1800 	return len + skb_headlen(skb);
 1801 }
 1802 
 1803 /**
 1804  * __skb_fill_page_desc - initialise a paged fragment in an skb
 1805  * @skb: buffer containing fragment to be initialised
 1806  * @i: paged fragment index to initialise
 1807  * @page: the page to use for this fragment
 1808  * @off: the offset to the data with @page
 1809  * @size: the length of the data
 1810  *
 1811  * Initialises the @i'th fragment of @skb to point to &size bytes at
 1812  * offset @off within @page.
 1813  *
 1814  * Does not take any additional reference on the fragment.
 1815  */
 1816 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
 1817 					struct page *page, int off, int size)
 1818 {
 1819 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 1820 
 1821 	/*
 1822 	 * Propagate page pfmemalloc to the skb if we can. The problem is
 1823 	 * that not all callers have unique ownership of the page but rely
 1824 	 * on page_is_pfmemalloc doing the right thing(tm).
 1825 	 */
 1826 	frag->page.p		  = page;
 1827 	frag->page_offset	  = off;
 1828 	skb_frag_size_set(frag, size);
 1829 
 1830 	page = compound_head(page);
 1831 	if (page_is_pfmemalloc(page))
 1832 		skb->pfmemalloc	= true;
 1833 }
 1834 
 1835 /**
 1836  * skb_fill_page_desc - initialise a paged fragment in an skb
 1837  * @skb: buffer containing fragment to be initialised
 1838  * @i: paged fragment index to initialise
 1839  * @page: the page to use for this fragment
 1840  * @off: the offset to the data with @page
 1841  * @size: the length of the data
 1842  *
 1843  * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
 1844  * @skb to point to @size bytes at offset @off within @page. In
 1845  * addition updates @skb such that @i is the last fragment.
 1846  *
 1847  * Does not take any additional reference on the fragment.
 1848  */
 1849 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
 1850 				      struct page *page, int off, int size)
 1851 {
 1852 	__skb_fill_page_desc(skb, i, page, off, size);
 1853 	skb_shinfo(skb)->nr_frags = i + 1;
 1854 }
 1855 
 1856 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 1857 		     int size, unsigned int truesize);
 1858 
 1859 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
 1860 			  unsigned int truesize);
 1861 
 1862 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
 1863 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
 1864 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
 1865 
 1866 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 1867 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
 1868 {
 1869 	return skb->head + skb->tail;
 1870 }
 1871 
 1872 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
 1873 {
 1874 	skb->tail = skb->data - skb->head;
 1875 }
 1876 
 1877 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
 1878 {
 1879 	skb_reset_tail_pointer(skb);
 1880 	skb->tail += offset;
 1881 }
 1882 
 1883 #else /* NET_SKBUFF_DATA_USES_OFFSET */
 1884 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
 1885 {
 1886 	return skb->tail;
 1887 }
 1888 
 1889 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
 1890 {
 1891 	skb->tail = skb->data;
 1892 }
 1893 
 1894 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
 1895 {
 1896 	skb->tail = skb->data + offset;
 1897 }
 1898 
 1899 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
 1900 
 1901 /*
 1902  *	Add data to an sk_buff
 1903  */
 1904 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
 1905 unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
 1906 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
 1907 {
 1908 	unsigned char *tmp = skb_tail_pointer(skb);
 1909 	SKB_LINEAR_ASSERT(skb);
 1910 	skb->tail += len;
 1911 	skb->len  += len;
 1912 	return tmp;
 1913 }
 1914 
 1915 unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
 1916 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
 1917 {
 1918 	skb->data -= len;
 1919 	skb->len  += len;
 1920 	return skb->data;
 1921 }
 1922 
 1923 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
 1924 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
 1925 {
 1926 	skb->len -= len;
 1927 	BUG_ON(skb->len < skb->data_len);
 1928 	return skb->data += len;
 1929 }
 1930 
 1931 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
 1932 {
 1933 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
 1934 }
 1935 
 1936 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
 1937 
 1938 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 1939 {
 1940 	if (len > skb_headlen(skb) &&
 1941 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
 1942 		return NULL;
 1943 	skb->len -= len;
 1944 	return skb->data += len;
 1945 }
 1946 
 1947 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
 1948 {
 1949 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
 1950 }
 1951 
 1952 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
 1953 {
 1954 	if (likely(len <= skb_headlen(skb)))
 1955 		return 1;
 1956 	if (unlikely(len > skb->len))
 1957 		return 0;
 1958 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
 1959 }
 1960 
 1961 /**
 1962  *	skb_headroom - bytes at buffer head
 1963  *	@skb: buffer to check
 1964  *
 1965  *	Return the number of bytes of free space at the head of an &sk_buff.
 1966  */
 1967 static inline unsigned int skb_headroom(const struct sk_buff *skb)
 1968 {
 1969 	return skb->data - skb->head;
 1970 }
 1971 
 1972 /**
 1973  *	skb_tailroom - bytes at buffer end
 1974  *	@skb: buffer to check
 1975  *
 1976  *	Return the number of bytes of free space at the tail of an sk_buff
 1977  */
 1978 static inline int skb_tailroom(const struct sk_buff *skb)
 1979 {
 1980 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
 1981 }
 1982 
 1983 /**
 1984  *	skb_availroom - bytes at buffer end
 1985  *	@skb: buffer to check
 1986  *
 1987  *	Return the number of bytes of free space at the tail of an sk_buff
 1988  *	allocated by sk_stream_alloc()
 1989  */
 1990 static inline int skb_availroom(const struct sk_buff *skb)
 1991 {
 1992 	if (skb_is_nonlinear(skb))
 1993 		return 0;
 1994 
 1995 	return skb->end - skb->tail - skb->reserved_tailroom;
 1996 }
 1997 
 1998 /**
 1999  *	skb_reserve - adjust headroom
 2000  *	@skb: buffer to alter
 2001  *	@len: bytes to move
 2002  *
 2003  *	Increase the headroom of an empty &sk_buff by reducing the tail
 2004  *	room. This is only allowed for an empty buffer.
 2005  */
 2006 static inline void skb_reserve(struct sk_buff *skb, int len)
 2007 {
 2008 	skb->data += len;
 2009 	skb->tail += len;
 2010 }
 2011 
 2012 /**
 2013  *	skb_tailroom_reserve - adjust reserved_tailroom
 2014  *	@skb: buffer to alter
 2015  *	@mtu: maximum amount of headlen permitted
 2016  *	@needed_tailroom: minimum amount of reserved_tailroom
 2017  *
 2018  *	Set reserved_tailroom so that headlen can be as large as possible but
 2019  *	not larger than mtu and tailroom cannot be smaller than
 2020  *	needed_tailroom.
 2021  *	The required headroom should already have been reserved before using
 2022  *	this function.
 2023  */
 2024 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
 2025 					unsigned int needed_tailroom)
 2026 {
 2027 	SKB_LINEAR_ASSERT(skb);
 2028 	if (mtu < skb_tailroom(skb) - needed_tailroom)
 2029 		/* use at most mtu */
 2030 		skb->reserved_tailroom = skb_tailroom(skb) - mtu;
 2031 	else
 2032 		/* use up to all available space */
 2033 		skb->reserved_tailroom = needed_tailroom;
 2034 }
 2035 
 2036 #define ENCAP_TYPE_ETHER	0
 2037 #define ENCAP_TYPE_IPPROTO	1
 2038 
 2039 static inline void skb_set_inner_protocol(struct sk_buff *skb,
 2040 					  __be16 protocol)
 2041 {
 2042 	skb->inner_protocol = protocol;
 2043 	skb->inner_protocol_type = ENCAP_TYPE_ETHER;
 2044 }
 2045 
 2046 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
 2047 					 __u8 ipproto)
 2048 {
 2049 	skb->inner_ipproto = ipproto;
 2050 	skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
 2051 }
 2052 
 2053 static inline void skb_reset_inner_headers(struct sk_buff *skb)
 2054 {
 2055 	skb->inner_mac_header = skb->mac_header;
 2056 	skb->inner_network_header = skb->network_header;
 2057 	skb->inner_transport_header = skb->transport_header;
 2058 }
 2059 
 2060 static inline void skb_reset_mac_len(struct sk_buff *skb)
 2061 {
 2062 	skb->mac_len = skb->network_header - skb->mac_header;
 2063 }
 2064 
 2065 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
 2066 							*skb)
 2067 {
 2068 	return skb->head + skb->inner_transport_header;
 2069 }
 2070 
 2071 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
 2072 {
 2073 	return skb_inner_transport_header(skb) - skb->data;
 2074 }
 2075 
 2076 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
 2077 {
 2078 	skb->inner_transport_header = skb->data - skb->head;
 2079 }
 2080 
 2081 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
 2082 						   const int offset)
 2083 {
 2084 	skb_reset_inner_transport_header(skb);
 2085 	skb->inner_transport_header += offset;
 2086 }
 2087 
 2088 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
 2089 {
 2090 	return skb->head + skb->inner_network_header;
 2091 }
 2092 
 2093 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
 2094 {
 2095 	skb->inner_network_header = skb->data - skb->head;
 2096 }
 2097 
 2098 static inline void skb_set_inner_network_header(struct sk_buff *skb,
 2099 						const int offset)
 2100 {
 2101 	skb_reset_inner_network_header(skb);
 2102 	skb->inner_network_header += offset;
 2103 }
 2104 
 2105 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
 2106 {
 2107 	return skb->head + skb->inner_mac_header;
 2108 }
 2109 
 2110 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
 2111 {
 2112 	skb->inner_mac_header = skb->data - skb->head;
 2113 }
 2114 
 2115 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
 2116 					    const int offset)
 2117 {
 2118 	skb_reset_inner_mac_header(skb);
 2119 	skb->inner_mac_header += offset;
 2120 }
 2121 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
 2122 {
 2123 	return skb->transport_header != (typeof(skb->transport_header))~0U;
 2124 }
 2125 
 2126 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
 2127 {
 2128 	return skb->head + skb->transport_header;
 2129 }
 2130 
 2131 static inline void skb_reset_transport_header(struct sk_buff *skb)
 2132 {
 2133 	skb->transport_header = skb->data - skb->head;
 2134 }
 2135 
 2136 static inline void skb_set_transport_header(struct sk_buff *skb,
 2137 					    const int offset)
 2138 {
 2139 	skb_reset_transport_header(skb);
 2140 	skb->transport_header += offset;
 2141 }
 2142 
 2143 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
 2144 {
 2145 	return skb->head + skb->network_header;
 2146 }
 2147 
 2148 static inline void skb_reset_network_header(struct sk_buff *skb)
 2149 {
 2150 	skb->network_header = skb->data - skb->head;
 2151 }
 2152 
 2153 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
 2154 {
 2155 	skb_reset_network_header(skb);
 2156 	skb->network_header += offset;
 2157 }
 2158 
 2159 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
 2160 {
 2161 	return skb->head + skb->mac_header;
 2162 }
 2163 
 2164 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
 2165 {
 2166 	return skb->mac_header != (typeof(skb->mac_header))~0U;
 2167 }
 2168 
 2169 static inline void skb_reset_mac_header(struct sk_buff *skb)
 2170 {
 2171 	skb->mac_header = skb->data - skb->head;
 2172 }
 2173 
 2174 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
 2175 {
 2176 	skb_reset_mac_header(skb);
 2177 	skb->mac_header += offset;
 2178 }
 2179 
 2180 static inline void skb_pop_mac_header(struct sk_buff *skb)
 2181 {
 2182 	skb->mac_header = skb->network_header;
 2183 }
 2184 
 2185 static inline void skb_probe_transport_header(struct sk_buff *skb,
 2186 					      const int offset_hint)
 2187 {
 2188 	struct flow_keys keys;
 2189 
 2190 	if (skb_transport_header_was_set(skb))
 2191 		return;
 2192 	else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
 2193 		skb_set_transport_header(skb, keys.control.thoff);
 2194 	else
 2195 		skb_set_transport_header(skb, offset_hint);
 2196 }
 2197 
 2198 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
 2199 {
 2200 	if (skb_mac_header_was_set(skb)) {
 2201 		const unsigned char *old_mac = skb_mac_header(skb);
 2202 
 2203 		skb_set_mac_header(skb, -skb->mac_len);
 2204 		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
 2205 	}
 2206 }
 2207 
 2208 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
 2209 {
 2210 	return skb->csum_start - skb_headroom(skb);
 2211 }
 2212 
 2213 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
 2214 {
 2215 	return skb->head + skb->csum_start;
 2216 }
 2217 
 2218 static inline int skb_transport_offset(const struct sk_buff *skb)
 2219 {
 2220 	return skb_transport_header(skb) - skb->data;
 2221 }
 2222 
 2223 static inline u32 skb_network_header_len(const struct sk_buff *skb)
 2224 {
 2225 	return skb->transport_header - skb->network_header;
 2226 }
 2227 
 2228 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
 2229 {
 2230 	return skb->inner_transport_header - skb->inner_network_header;
 2231 }
 2232 
 2233 static inline int skb_network_offset(const struct sk_buff *skb)
 2234 {
 2235 	return skb_network_header(skb) - skb->data;
 2236 }
 2237 
 2238 static inline int skb_inner_network_offset(const struct sk_buff *skb)
 2239 {
 2240 	return skb_inner_network_header(skb) - skb->data;
 2241 }
 2242 
 2243 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
 2244 {
 2245 	return pskb_may_pull(skb, skb_network_offset(skb) + len);
 2246 }
 2247 
 2248 /*
 2249  * CPUs often take a performance hit when accessing unaligned memory
 2250  * locations. The actual performance hit varies, it can be small if the
 2251  * hardware handles it or large if we have to take an exception and fix it
 2252  * in software.
 2253  *
 2254  * Since an ethernet header is 14 bytes network drivers often end up with
 2255  * the IP header at an unaligned offset. The IP header can be aligned by
 2256  * shifting the start of the packet by 2 bytes. Drivers should do this
 2257  * with:
 2258  *
 2259  * skb_reserve(skb, NET_IP_ALIGN);
 2260  *
 2261  * The downside to this alignment of the IP header is that the DMA is now
 2262  * unaligned. On some architectures the cost of an unaligned DMA is high
 2263  * and this cost outweighs the gains made by aligning the IP header.
 2264  *
 2265  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
 2266  * to be overridden.
 2267  */
 2268 #ifndef NET_IP_ALIGN
 2269 #define NET_IP_ALIGN	2
 2270 #endif
 2271 
 2272 /*
 2273  * The networking layer reserves some headroom in skb data (via
 2274  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
 2275  * the header has to grow. In the default case, if the header has to grow
 2276  * 32 bytes or less we avoid the reallocation.
 2277  *
 2278  * Unfortunately this headroom changes the DMA alignment of the resulting
 2279  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
 2280  * on some architectures. An architecture can override this value,
 2281  * perhaps setting it to a cacheline in size (since that will maintain
 2282  * cacheline alignment of the DMA). It must be a power of 2.
 2283  *
 2284  * Various parts of the networking layer expect at least 32 bytes of
 2285  * headroom, you should not reduce this.
 2286  *
 2287  * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
 2288  * to reduce average number of cache lines per packet.
 2289  * get_rps_cpus() for example only access one 64 bytes aligned block :
 2290  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
 2291  */
 2292 #ifndef NET_SKB_PAD
 2293 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
 2294 #endif
 2295 
 2296 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
 2297 
 2298 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
 2299 {
 2300 	if (unlikely(skb_is_nonlinear(skb))) {
 2301 		WARN_ON(1);
 2302 		return;
 2303 	}
 2304 	skb->len = len;
 2305 	skb_set_tail_pointer(skb, len);
 2306 }
 2307 
 2308 void skb_trim(struct sk_buff *skb, unsigned int len);
 2309 
 2310 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
 2311 {
 2312 	if (skb->data_len)
 2313 		return ___pskb_trim(skb, len);
 2314 	__skb_trim(skb, len);
 2315 	return 0;
 2316 }
 2317 
 2318 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
 2319 {
 2320 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
 2321 }
 2322 
 2323 /**
 2324  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
 2325  *	@skb: buffer to alter
 2326  *	@len: new length
 2327  *
 2328  *	This is identical to pskb_trim except that the caller knows that
 2329  *	the skb is not cloned so we should never get an error due to out-
 2330  *	of-memory.
 2331  */
 2332 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
 2333 {
 2334 	int err = pskb_trim(skb, len);
 2335 	BUG_ON(err);
 2336 }
 2337 
 2338 /**
 2339  *	skb_orphan - orphan a buffer
 2340  *	@skb: buffer to orphan
 2341  *
 2342  *	If a buffer currently has an owner then we call the owner's
 2343  *	destructor function and make the @skb unowned. The buffer continues
 2344  *	to exist but is no longer charged to its former owner.
 2345  */
 2346 static inline void skb_orphan(struct sk_buff *skb)
 2347 {
 2348 	if (skb->destructor) {
 2349 		skb->destructor(skb);
 2350 		skb->destructor = NULL;
 2351 		skb->sk		= NULL;
 2352 	} else {
 2353 		BUG_ON(skb->sk);
 2354 	}
 2355 }
 2356 
 2357 /**
 2358  *	skb_orphan_frags - orphan the frags contained in a buffer
 2359  *	@skb: buffer to orphan frags from
 2360  *	@gfp_mask: allocation mask for replacement pages
 2361  *
 2362  *	For each frag in the SKB which needs a destructor (i.e. has an
 2363  *	owner) create a copy of that frag and release the original
 2364  *	page by calling the destructor.
 2365  */
 2366 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
 2367 {
 2368 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
 2369 		return 0;
 2370 	return skb_copy_ubufs(skb, gfp_mask);
 2371 }
 2372 
 2373 /**
 2374  *	__skb_queue_purge - empty a list
 2375  *	@list: list to empty
 2376  *
 2377  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
 2378  *	the list and one reference dropped. This function does not take the
 2379  *	list lock and the caller must hold the relevant locks to use it.
 2380  */
 2381 void skb_queue_purge(struct sk_buff_head *list);
 2382 static inline void __skb_queue_purge(struct sk_buff_head *list)
 2383 {
 2384 	struct sk_buff *skb;
 2385 	while ((skb = __skb_dequeue(list)) != NULL)
 2386 		kfree_skb(skb);
 2387 }
 2388 
 2389 void *netdev_alloc_frag(unsigned int fragsz);
 2390 
 2391 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
 2392 				   gfp_t gfp_mask);
 2393 
 2394 /**
 2395  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
 2396  *	@dev: network device to receive on
 2397  *	@length: length to allocate
 2398  *
 2399  *	Allocate a new &sk_buff and assign it a usage count of one. The
 2400  *	buffer has unspecified headroom built in. Users should allocate
 2401  *	the headroom they think they need without accounting for the
 2402  *	built in space. The built in space is used for optimisations.
 2403  *
 2404  *	%NULL is returned if there is no free memory. Although this function
 2405  *	allocates memory it can be called from an interrupt.
 2406  */
 2407 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
 2408 					       unsigned int length)
 2409 {
 2410 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
 2411 }
 2412 
 2413 /* legacy helper around __netdev_alloc_skb() */
 2414 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
 2415 					      gfp_t gfp_mask)
 2416 {
 2417 	return __netdev_alloc_skb(NULL, length, gfp_mask);
 2418 }
 2419 
 2420 /* legacy helper around netdev_alloc_skb() */
 2421 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
 2422 {
 2423 	return netdev_alloc_skb(NULL, length);
 2424 }
 2425 
 2426 
 2427 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
 2428 		unsigned int length, gfp_t gfp)
 2429 {
 2430 	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
 2431 
 2432 	if (NET_IP_ALIGN && skb)
 2433 		skb_reserve(skb, NET_IP_ALIGN);
 2434 	return skb;
 2435 }
 2436 
 2437 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
 2438 		unsigned int length)
 2439 {
 2440 	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
 2441 }
 2442 
 2443 static inline void skb_free_frag(void *addr)
 2444 {
 2445 	__free_page_frag(addr);
 2446 }
 2447 
 2448 void *napi_alloc_frag(unsigned int fragsz);
 2449 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
 2450 				 unsigned int length, gfp_t gfp_mask);
 2451 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
 2452 					     unsigned int length)
 2453 {
 2454 	return __napi_alloc_skb(napi, length, GFP_ATOMIC);
 2455 }
 2456 void napi_consume_skb(struct sk_buff *skb, int budget);
 2457 
 2458 void __kfree_skb_flush(void);
 2459 void __kfree_skb_defer(struct sk_buff *skb);
 2460 
 2461 /**
 2462  * __dev_alloc_pages - allocate page for network Rx
 2463  * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 2464  * @order: size of the allocation
 2465  *
 2466  * Allocate a new page.
 2467  *
 2468  * %NULL is returned if there is no free memory.
 2469 */
 2470 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
 2471 					     unsigned int order)
 2472 {
 2473 	/* This piece of code contains several assumptions.
 2474 	 * 1.  This is for device Rx, therefor a cold page is preferred.
 2475 	 * 2.  The expectation is the user wants a compound page.
 2476 	 * 3.  If requesting a order 0 page it will not be compound
 2477 	 *     due to the check to see if order has a value in prep_new_page
 2478 	 * 4.  __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
 2479 	 *     code in gfp_to_alloc_flags that should be enforcing this.
 2480 	 */
 2481 	gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
 2482 
 2483 	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
 2484 }
 2485 
 2486 static inline struct page *dev_alloc_pages(unsigned int order)
 2487 {
 2488 	return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
 2489 }
 2490 
 2491 /**
 2492  * __dev_alloc_page - allocate a page for network Rx
 2493  * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 2494  *
 2495  * Allocate a new page.
 2496  *
 2497  * %NULL is returned if there is no free memory.
 2498  */
 2499 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
 2500 {
 2501 	return __dev_alloc_pages(gfp_mask, 0);
 2502 }
 2503 
 2504 static inline struct page *dev_alloc_page(void)
 2505 {
 2506 	return dev_alloc_pages(0);
 2507 }
 2508 
 2509 /**
 2510  *	skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
 2511  *	@page: The page that was allocated from skb_alloc_page
 2512  *	@skb: The skb that may need pfmemalloc set
 2513  */
 2514 static inline void skb_propagate_pfmemalloc(struct page *page,
 2515 					     struct sk_buff *skb)
 2516 {
 2517 	if (page_is_pfmemalloc(page))
 2518 		skb->pfmemalloc = true;
 2519 }
 2520 
 2521 /**
 2522  * skb_frag_page - retrieve the page referred to by a paged fragment
 2523  * @frag: the paged fragment
 2524  *
 2525  * Returns the &struct page associated with @frag.
 2526  */
 2527 static inline struct page *skb_frag_page(const skb_frag_t *frag)
 2528 {
 2529 	return frag->page.p;
 2530 }
 2531 
 2532 /**
 2533  * __skb_frag_ref - take an addition reference on a paged fragment.
 2534  * @frag: the paged fragment
 2535  *
 2536  * Takes an additional reference on the paged fragment @frag.
 2537  */
 2538 static inline void __skb_frag_ref(skb_frag_t *frag)
 2539 {
 2540 	get_page(skb_frag_page(frag));
 2541 }
 2542 
 2543 /**
 2544  * skb_frag_ref - take an addition reference on a paged fragment of an skb.
 2545  * @skb: the buffer
 2546  * @f: the fragment offset.
 2547  *
 2548  * Takes an additional reference on the @f'th paged fragment of @skb.
 2549  */
 2550 static inline void skb_frag_ref(struct sk_buff *skb, int f)
 2551 {
 2552 	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
 2553 }
 2554 
 2555 /**
 2556  * __skb_frag_unref - release a reference on a paged fragment.
 2557  * @frag: the paged fragment
 2558  *
 2559  * Releases a reference on the paged fragment @frag.
 2560  */
 2561 static inline void __skb_frag_unref(skb_frag_t *frag)
 2562 {
 2563 	put_page(skb_frag_page(frag));
 2564 }
 2565 
 2566 /**
 2567  * skb_frag_unref - release a reference on a paged fragment of an skb.
 2568  * @skb: the buffer
 2569  * @f: the fragment offset
 2570  *
 2571  * Releases a reference on the @f'th paged fragment of @skb.
 2572  */
 2573 static inline void skb_frag_unref(struct sk_buff *skb, int f)
 2574 {
 2575 	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
 2576 }
 2577 
 2578 /**
 2579  * skb_frag_address - gets the address of the data contained in a paged fragment
 2580  * @frag: the paged fragment buffer
 2581  *
 2582  * Returns the address of the data within @frag. The page must already
 2583  * be mapped.
 2584  */
 2585 static inline void *skb_frag_address(const skb_frag_t *frag)
 2586 {
 2587 	return page_address(skb_frag_page(frag)) + frag->page_offset;
 2588 }
 2589 
 2590 /**
 2591  * skb_frag_address_safe - gets the address of the data contained in a paged fragment
 2592  * @frag: the paged fragment buffer
 2593  *
 2594  * Returns the address of the data within @frag. Checks that the page
 2595  * is mapped and returns %NULL otherwise.
 2596  */
 2597 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
 2598 {
 2599 	void *ptr = page_address(skb_frag_page(frag));
 2600 	if (unlikely(!ptr))
 2601 		return NULL;
 2602 
 2603 	return ptr + frag->page_offset;
 2604 }
 2605 
 2606 /**
 2607  * __skb_frag_set_page - sets the page contained in a paged fragment
 2608  * @frag: the paged fragment
 2609  * @page: the page to set
 2610  *
 2611  * Sets the fragment @frag to contain @page.
 2612  */
 2613 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
 2614 {
 2615 	frag->page.p = page;
 2616 }
 2617 
 2618 /**
 2619  * skb_frag_set_page - sets the page contained in a paged fragment of an skb
 2620  * @skb: the buffer
 2621  * @f: the fragment offset
 2622  * @page: the page to set
 2623  *
 2624  * Sets the @f'th fragment of @skb to contain @page.
 2625  */
 2626 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
 2627 				     struct page *page)
 2628 {
 2629 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
 2630 }
 2631 
 2632 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
 2633 
 2634 /**
 2635  * skb_frag_dma_map - maps a paged fragment via the DMA API
 2636  * @dev: the device to map the fragment to
 2637  * @frag: the paged fragment to map
 2638  * @offset: the offset within the fragment (starting at the
 2639  *          fragment's own offset)
 2640  * @size: the number of bytes to map
 2641  * @dir: the direction of the mapping (%PCI_DMA_*)
 2642  *
 2643  * Maps the page associated with @frag to @device.
 2644  */
 2645 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
 2646 					  const skb_frag_t *frag,
 2647 					  size_t offset, size_t size,
 2648 					  enum dma_data_direction dir)
 2649 {
 2650 	return dma_map_page(dev, skb_frag_page(frag),
 2651 			    frag->page_offset + offset, size, dir);
 2652 }
 2653 
 2654 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
 2655 					gfp_t gfp_mask)
 2656 {
 2657 	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
 2658 }
 2659 
 2660 
 2661 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
 2662 						  gfp_t gfp_mask)
 2663 {
 2664 	return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
 2665 }
 2666 
 2667 
 2668 /**
 2669  *	skb_clone_writable - is the header of a clone writable
 2670  *	@skb: buffer to check
 2671  *	@len: length up to which to write
 2672  *
 2673  *	Returns true if modifying the header part of the cloned buffer
 2674  *	does not requires the data to be copied.
 2675  */
 2676 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
 2677 {
 2678 	return !skb_header_cloned(skb) &&
 2679 	       skb_headroom(skb) + len <= skb->hdr_len;
 2680 }
 2681 
 2682 static inline int skb_try_make_writable(struct sk_buff *skb,
 2683 					unsigned int write_len)
 2684 {
 2685 	return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
 2686 	       pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 2687 }
 2688 
 2689 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
 2690 			    int cloned)
 2691 {
 2692 	int delta = 0;
 2693 
 2694 	if (headroom > skb_headroom(skb))
 2695 		delta = headroom - skb_headroom(skb);
 2696 
 2697 	if (delta || cloned)
 2698 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
 2699 					GFP_ATOMIC);
 2700 	return 0;
 2701 }
 2702 
 2703 /**
 2704  *	skb_cow - copy header of skb when it is required
 2705  *	@skb: buffer to cow
 2706  *	@headroom: needed headroom
 2707  *
 2708  *	If the skb passed lacks sufficient headroom or its data part
 2709  *	is shared, data is reallocated. If reallocation fails, an error
 2710  *	is returned and original skb is not changed.
 2711  *
 2712  *	The result is skb with writable area skb->head...skb->tail
 2713  *	and at least @headroom of space at head.
 2714  */
 2715 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
 2716 {
 2717 	return __skb_cow(skb, headroom, skb_cloned(skb));
 2718 }
 2719 
 2720 /**
 2721  *	skb_cow_head - skb_cow but only making the head writable
 2722  *	@skb: buffer to cow
 2723  *	@headroom: needed headroom
 2724  *
 2725  *	This function is identical to skb_cow except that we replace the
 2726  *	skb_cloned check by skb_header_cloned.  It should be used when
 2727  *	you only need to push on some header and do not need to modify
 2728  *	the data.
 2729  */
 2730 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
 2731 {
 2732 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
 2733 }
 2734 
 2735 /**
 2736  *	skb_padto	- pad an skbuff up to a minimal size
 2737  *	@skb: buffer to pad
 2738  *	@len: minimal length
 2739  *
 2740  *	Pads up a buffer to ensure the trailing bytes exist and are
 2741  *	blanked. If the buffer already contains sufficient data it
 2742  *	is untouched. Otherwise it is extended. Returns zero on
 2743  *	success. The skb is freed on error.
 2744  */
 2745 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
 2746 {
 2747 	unsigned int size = skb->len;
 2748 	if (likely(size >= len))
 2749 		return 0;
 2750 	return skb_pad(skb, len - size);
 2751 }
 2752 
 2753 /**
 2754  *	skb_put_padto - increase size and pad an skbuff up to a minimal size
 2755  *	@skb: buffer to pad
 2756  *	@len: minimal length
 2757  *
 2758  *	Pads up a buffer to ensure the trailing bytes exist and are
 2759  *	blanked. If the buffer already contains sufficient data it
 2760  *	is untouched. Otherwise it is extended. Returns zero on
 2761  *	success. The skb is freed on error.
 2762  */
 2763 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
 2764 {
 2765 	unsigned int size = skb->len;
 2766 
 2767 	if (unlikely(size < len)) {
 2768 		len -= size;
 2769 		if (skb_pad(skb, len))
 2770 			return -ENOMEM;
 2771 		__skb_put(skb, len);
 2772 	}
 2773 	return 0;
 2774 }
 2775 
 2776 static inline int skb_add_data(struct sk_buff *skb,
 2777 			       struct iov_iter *from, int copy)
 2778 {
 2779 	const int off = skb->len;
 2780 
 2781 	if (skb->ip_summed == CHECKSUM_NONE) {
 2782 		__wsum csum = 0;
 2783 		if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
 2784 					    &csum, from) == copy) {
 2785 			skb->csum = csum_block_add(skb->csum, csum, off);
 2786 			return 0;
 2787 		}
 2788 	} else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
 2789 		return 0;
 2790 
 2791 	__skb_trim(skb, off);
 2792 	return -EFAULT;
 2793 }
 2794 
 2795 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
 2796 				    const struct page *page, int off)
 2797 {
 2798 	if (i) {
 2799 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
 2800 
 2801 		return page == skb_frag_page(frag) &&
 2802 		       off == frag->page_offset + skb_frag_size(frag);
 2803 	}
 2804 	return false;
 2805 }
 2806 
 2807 static inline int __skb_linearize(struct sk_buff *skb)
 2808 {
 2809 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
 2810 }
 2811 
 2812 /**
 2813  *	skb_linearize - convert paged skb to linear one
 2814  *	@skb: buffer to linarize
 2815  *
 2816  *	If there is no free memory -ENOMEM is returned, otherwise zero
 2817  *	is returned and the old skb data released.
 2818  */
 2819 static inline int skb_linearize(struct sk_buff *skb)
 2820 {
 2821 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
 2822 }
 2823 
 2824 /**
 2825  * skb_has_shared_frag - can any frag be overwritten
 2826  * @skb: buffer to test
 2827  *
 2828  * Return true if the skb has at least one frag that might be modified
 2829  * by an external entity (as in vmsplice()/sendfile())
 2830  */
 2831 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
 2832 {
 2833 	return skb_is_nonlinear(skb) &&
 2834 	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
 2835 }
 2836 
 2837 /**
 2838  *	skb_linearize_cow - make sure skb is linear and writable
 2839  *	@skb: buffer to process
 2840  *
 2841  *	If there is no free memory -ENOMEM is returned, otherwise zero
 2842  *	is returned and the old skb data released.
 2843  */
 2844 static inline int skb_linearize_cow(struct sk_buff *skb)
 2845 {
 2846 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
 2847 	       __skb_linearize(skb) : 0;
 2848 }
 2849 
 2850 /**
 2851  *	skb_postpull_rcsum - update checksum for received skb after pull
 2852  *	@skb: buffer to update
 2853  *	@start: start of data before pull
 2854  *	@len: length of data pulled
 2855  *
 2856  *	After doing a pull on a received packet, you need to call this to
 2857  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
 2858  *	CHECKSUM_NONE so that it can be recomputed from scratch.
 2859  */
 2860 
 2861 static inline void skb_postpull_rcsum(struct sk_buff *skb,
 2862 				      const void *start, unsigned int len)
 2863 {
 2864 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 2865 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
 2866 	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
 2867 		 skb_checksum_start_offset(skb) < 0)
 2868 		skb->ip_summed = CHECKSUM_NONE;
 2869 }
 2870 
 2871 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
 2872 
 2873 static inline void skb_postpush_rcsum(struct sk_buff *skb,
 2874 				      const void *start, unsigned int len)
 2875 {
 2876 	/* For performing the reverse operation to skb_postpull_rcsum(),
 2877 	 * we can instead of ...
 2878 	 *
 2879 	 *   skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
 2880 	 *
 2881 	 * ... just use this equivalent version here to save a few
 2882 	 * instructions. Feeding csum of 0 in csum_partial() and later
 2883 	 * on adding skb->csum is equivalent to feed skb->csum in the
 2884 	 * first place.
 2885 	 */
 2886 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 2887 		skb->csum = csum_partial(start, len, skb->csum);
 2888 }
 2889 
 2890 /**
 2891  *	skb_push_rcsum - push skb and update receive checksum
 2892  *	@skb: buffer to update
 2893  *	@len: length of data pulled
 2894  *
 2895  *	This function performs an skb_push on the packet and updates
 2896  *	the CHECKSUM_COMPLETE checksum.  It should be used on
 2897  *	receive path processing instead of skb_push unless you know
 2898  *	that the checksum difference is zero (e.g., a valid IP header)
 2899  *	or you are setting ip_summed to CHECKSUM_NONE.
 2900  */
 2901 static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
 2902 					    unsigned int len)
 2903 {
 2904 	skb_push(skb, len);
 2905 	skb_postpush_rcsum(skb, skb->data, len);
 2906 	return skb->data;
 2907 }
 2908 
 2909 /**
 2910  *	pskb_trim_rcsum - trim received skb and update checksum
 2911  *	@skb: buffer to trim
 2912  *	@len: new length
 2913  *
 2914  *	This is exactly the same as pskb_trim except that it ensures the
 2915  *	checksum of received packets are still valid after the operation.
 2916  */
 2917 
 2918 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
 2919 {
 2920 	if (likely(len >= skb->len))
 2921 		return 0;
 2922 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 2923 		skb->ip_summed = CHECKSUM_NONE;
 2924 	return __pskb_trim(skb, len);
 2925 }
 2926 
 2927 #define skb_queue_walk(queue, skb) \
 2928 		for (skb = (queue)->next;					\
 2929 		     skb != (struct sk_buff *)(queue);				\
 2930 		     skb = skb->next)
 2931 
 2932 #define skb_queue_walk_safe(queue, skb, tmp)					\
 2933 		for (skb = (queue)->next, tmp = skb->next;			\
 2934 		     skb != (struct sk_buff *)(queue);				\
 2935 		     skb = tmp, tmp = skb->next)
 2936 
 2937 #define skb_queue_walk_from(queue, skb)						\
 2938 		for (; skb != (struct sk_buff *)(queue);			\
 2939 		     skb = skb->next)
 2940 
 2941 #define skb_queue_walk_from_safe(queue, skb, tmp)				\
 2942 		for (tmp = skb->next;						\
 2943 		     skb != (struct sk_buff *)(queue);				\
 2944 		     skb = tmp, tmp = skb->next)
 2945 
 2946 #define skb_queue_reverse_walk(queue, skb) \
 2947 		for (skb = (queue)->prev;					\
 2948 		     skb != (struct sk_buff *)(queue);				\
 2949 		     skb = skb->prev)
 2950 
 2951 #define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
 2952 		for (skb = (queue)->prev, tmp = skb->prev;			\
 2953 		     skb != (struct sk_buff *)(queue);				\
 2954 		     skb = tmp, tmp = skb->prev)
 2955 
 2956 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
 2957 		for (tmp = skb->prev;						\
 2958 		     skb != (struct sk_buff *)(queue);				\
 2959 		     skb = tmp, tmp = skb->prev)
 2960 
 2961 static inline bool skb_has_frag_list(const struct sk_buff *skb)
 2962 {
 2963 	return skb_shinfo(skb)->frag_list != NULL;
 2964 }
 2965 
 2966 static inline void skb_frag_list_init(struct sk_buff *skb)
 2967 {
 2968 	skb_shinfo(skb)->frag_list = NULL;
 2969 }
 2970 
 2971 #define skb_walk_frags(skb, iter)	\
 2972 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 2973 
 2974 
 2975 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
 2976 				const struct sk_buff *skb);
 2977 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
 2978 					int *peeked, int *off, int *err,
 2979 					struct sk_buff **last);
 2980 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
 2981 				    int *peeked, int *off, int *err);
 2982 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
 2983 				  int *err);
 2984 unsigned int datagram_poll(struct file *file, struct socket *sock,
 2985 			   struct poll_table_struct *wait);
 2986 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
 2987 			   struct iov_iter *to, int size);
 2988 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
 2989 					struct msghdr *msg, int size)
 2990 {
 2991 	return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
 2992 }
 2993 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
 2994 				   struct msghdr *msg);
 2995 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
 2996 				 struct iov_iter *from, int len);
 2997 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
 2998 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
 2999 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
 3000 static inline void skb_free_datagram_locked(struct sock *sk,
 3001 					    struct sk_buff *skb)
 3002 {
 3003 	__skb_free_datagram_locked(sk, skb, 0);
 3004 }
 3005 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
 3006 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
 3007 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
 3008 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
 3009 			      int len, __wsum csum);
 3010 ssize_t skb_socket_splice(struct sock *sk,
 3011 			  struct pipe_inode_info *pipe,
 3012 			  struct splice_pipe_desc *spd);
 3013 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
 3014 		    struct pipe_inode_info *pipe, unsigned int len,
 3015 		    unsigned int flags,
 3016 		    ssize_t (*splice_cb)(struct sock *,
 3017 					 struct pipe_inode_info *,
 3018 					 struct splice_pipe_desc *));
 3019 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
 3020 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
 3021 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
 3022 		 int len, int hlen);
 3023 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
 3024 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
 3025 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
 3026 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
 3027 bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
 3028 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 3029 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
 3030 int skb_ensure_writable(struct sk_buff *skb, int write_len);
 3031 int skb_vlan_pop(struct sk_buff *skb);
 3032 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
 3033 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
 3034 			     gfp_t gfp);
 3035 
 3036 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
 3037 {
 3038 	return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
 3039 }
 3040 
 3041 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
 3042 {
 3043 	return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
 3044 }
 3045 
 3046 struct skb_checksum_ops {
 3047 	__wsum (*update)(const void *mem, int len, __wsum wsum);
 3048 	__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
 3049 };
 3050 
 3051 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
 3052 		      __wsum csum, const struct skb_checksum_ops *ops);
 3053 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
 3054 		    __wsum csum);
 3055 
 3056 static inline void * __must_check
 3057 __skb_header_pointer(const struct sk_buff *skb, int offset,
 3058 		     int len, void *data, int hlen, void *buffer)
 3059 {
 3060 	if (hlen - offset >= len)
 3061 		return data + offset;
 3062 
 3063 	if (!skb ||
 3064 	    skb_copy_bits(skb, offset, buffer, len) < 0)
 3065 		return NULL;
 3066 
 3067 	return buffer;
 3068 }
 3069 
 3070 static inline void * __must_check
 3071 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
 3072 {
 3073 	return __skb_header_pointer(skb, offset, len, skb->data,
 3074 				    skb_headlen(skb), buffer);
 3075 }
 3076 
 3077 /**
 3078  *	skb_needs_linearize - check if we need to linearize a given skb
 3079  *			      depending on the given device features.
 3080  *	@skb: socket buffer to check
 3081  *	@features: net device features
 3082  *
 3083  *	Returns true if either:
 3084  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
 3085  *	2. skb is fragmented and the device does not support SG.
 3086  */
 3087 static inline bool skb_needs_linearize(struct sk_buff *skb,
 3088 				       netdev_features_t features)
 3089 {
 3090 	return skb_is_nonlinear(skb) &&
 3091 	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
 3092 		(skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
 3093 }
 3094 
 3095 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
 3096 					     void *to,
 3097 					     const unsigned int len)
 3098 {
 3099 	memcpy(to, skb->data, len);
 3100 }
 3101 
 3102 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
 3103 						    const int offset, void *to,
 3104 						    const unsigned int len)
 3105 {
 3106 	memcpy(to, skb->data + offset, len);
 3107 }
 3108 
 3109 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
 3110 					   const void *from,
 3111 					   const unsigned int len)
 3112 {
 3113 	memcpy(skb->data, from, len);
 3114 }
 3115 
 3116 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
 3117 						  const int offset,
 3118 						  const void *from,
 3119 						  const unsigned int len)
 3120 {
 3121 	memcpy(skb->data + offset, from, len);
 3122 }
 3123 
 3124 void skb_init(void);
 3125 
 3126 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
 3127 {
 3128 	return skb->tstamp;
 3129 }
 3130 
 3131 /**
 3132  *	skb_get_timestamp - get timestamp from a skb
 3133  *	@skb: skb to get stamp from
 3134  *	@stamp: pointer to struct timeval to store stamp in
 3135  *
 3136  *	Timestamps are stored in the skb as offsets to a base timestamp.
 3137  *	This function converts the offset back to a struct timeval and stores
 3138  *	it in stamp.
 3139  */
 3140 static inline void skb_get_timestamp(const struct sk_buff *skb,
 3141 				     struct timeval *stamp)
 3142 {
 3143 	*stamp = ktime_to_timeval(skb->tstamp);
 3144 }
 3145 
 3146 static inline void skb_get_timestampns(const struct sk_buff *skb,
 3147 				       struct timespec *stamp)
 3148 {
 3149 	*stamp = ktime_to_timespec(skb->tstamp);
 3150 }
 3151 
 3152 static inline void __net_timestamp(struct sk_buff *skb)
 3153 {
 3154 	skb->tstamp = ktime_get_real();
 3155 }
 3156 
 3157 static inline ktime_t net_timedelta(ktime_t t)
 3158 {
 3159 	return ktime_sub(ktime_get_real(), t);
 3160 }
 3161 
 3162 static inline ktime_t net_invalid_timestamp(void)
 3163 {
 3164 	return ktime_set(0, 0);
 3165 }
 3166 
 3167 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
 3168 
 3169 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
 3170 
 3171 void skb_clone_tx_timestamp(struct sk_buff *skb);
 3172 bool skb_defer_rx_timestamp(struct sk_buff *skb);
 3173 
 3174 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
 3175 
 3176 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
 3177 {
 3178 }
 3179 
 3180 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
 3181 {
 3182 	return false;
 3183 }
 3184 
 3185 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
 3186 
 3187 /**
 3188  * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
 3189  *
 3190  * PHY drivers may accept clones of transmitted packets for
 3191  * timestamping via their phy_driver.txtstamp method. These drivers
 3192  * must call this function to return the skb back to the stack with a
 3193  * timestamp.
 3194  *
 3195  * @skb: clone of the the original outgoing packet
 3196  * @hwtstamps: hardware time stamps
 3197  *
 3198  */
 3199 void skb_complete_tx_timestamp(struct sk_buff *skb,
 3200 			       struct skb_shared_hwtstamps *hwtstamps);
 3201 
 3202 void __skb_tstamp_tx(struct sk_buff *orig_skb,
 3203 		     struct skb_shared_hwtstamps *hwtstamps,
 3204 		     struct sock *sk, int tstype);
 3205 
 3206 /**
 3207  * skb_tstamp_tx - queue clone of skb with send time stamps
 3208  * @orig_skb:	the original outgoing packet
 3209  * @hwtstamps:	hardware time stamps, may be NULL if not available
 3210  *
 3211  * If the skb has a socket associated, then this function clones the
 3212  * skb (thus sharing the actual data and optional structures), stores
 3213  * the optional hardware time stamping information (if non NULL) or
 3214  * generates a software time stamp (otherwise), then queues the clone
 3215  * to the error queue of the socket.  Errors are silently ignored.
 3216  */
 3217 void skb_tstamp_tx(struct sk_buff *orig_skb,
 3218 		   struct skb_shared_hwtstamps *hwtstamps);
 3219 
 3220 static inline void sw_tx_timestamp(struct sk_buff *skb)
 3221 {
 3222 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
 3223 	    !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
 3224 		skb_tstamp_tx(skb, NULL);
 3225 }
 3226 
 3227 /**
 3228  * skb_tx_timestamp() - Driver hook for transmit timestamping
 3229  *
 3230  * Ethernet MAC Drivers should call this function in their hard_xmit()
 3231  * function immediately before giving the sk_buff to the MAC hardware.
 3232  *
 3233  * Specifically, one should make absolutely sure that this function is
 3234  * called before TX completion of this packet can trigger.  Otherwise
 3235  * the packet could potentially already be freed.
 3236  *
 3237  * @skb: A socket buffer.
 3238  */
 3239 static inline void skb_tx_timestamp(struct sk_buff *skb)
 3240 {
 3241 	skb_clone_tx_timestamp(skb);
 3242 	sw_tx_timestamp(skb);
 3243 }
 3244 
 3245 /**
 3246  * skb_complete_wifi_ack - deliver skb with wifi status
 3247  *
 3248  * @skb: the original outgoing packet
 3249  * @acked: ack status
 3250  *
 3251  */
 3252 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
 3253 
 3254 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
 3255 __sum16 __skb_checksum_complete(struct sk_buff *skb);
 3256 
 3257 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
 3258 {
 3259 	return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
 3260 		skb->csum_valid ||
 3261 		(skb->ip_summed == CHECKSUM_PARTIAL &&
 3262 		 skb_checksum_start_offset(skb) >= 0));
 3263 }
 3264 
 3265 /**
 3266  *	skb_checksum_complete - Calculate checksum of an entire packet
 3267  *	@skb: packet to process
 3268  *
 3269  *	This function calculates the checksum over the entire packet plus
 3270  *	the value of skb->csum.  The latter can be used to supply the
 3271  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
 3272  *	checksum.
 3273  *
 3274  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
 3275  *	this function can be used to verify that checksum on received
 3276  *	packets.  In that case the function should return zero if the
 3277  *	checksum is correct.  In particular, this function will return zero
 3278  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
 3279  *	hardware has already verified the correctness of the checksum.
 3280  */
 3281 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
 3282 {
 3283 	return skb_csum_unnecessary(skb) ?
 3284 	       0 : __skb_checksum_complete(skb);
 3285 }
 3286 
 3287 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
 3288 {
 3289 	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 3290 		if (skb->csum_level == 0)
 3291 			skb->ip_summed = CHECKSUM_NONE;
 3292 		else
 3293 			skb->csum_level--;
 3294 	}
 3295 }
 3296 
 3297 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
 3298 {
 3299 	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 3300 		if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
 3301 			skb->csum_level++;
 3302 	} else if (skb->ip_summed == CHECKSUM_NONE) {
 3303 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 3304 		skb->csum_level = 0;
 3305 	}
 3306 }
 3307 
 3308 static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
 3309 {
 3310 	/* Mark current checksum as bad (typically called from GRO
 3311 	 * path). In the case that ip_summed is CHECKSUM_NONE
 3312 	 * this must be the first checksum encountered in the packet.
 3313 	 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
 3314 	 * checksum after the last one validated. For UDP, a zero
 3315 	 * checksum can not be marked as bad.
 3316 	 */
 3317 
 3318 	if (skb->ip_summed == CHECKSUM_NONE ||
 3319 	    skb->ip_summed == CHECKSUM_UNNECESSARY)
 3320 		skb->csum_bad = 1;
 3321 }
 3322 
 3323 /* Check if we need to perform checksum complete validation.
 3324  *
 3325  * Returns true if checksum complete is needed, false otherwise
 3326  * (either checksum is unnecessary or zero checksum is allowed).
 3327  */
 3328 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
 3329 						  bool zero_okay,
 3330 						  __sum16 check)
 3331 {
 3332 	if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
 3333 		skb->csum_valid = 1;
 3334 		__skb_decr_checksum_unnecessary(skb);
 3335 		return false;
 3336 	}
 3337 
 3338 	return true;
 3339 }
 3340 
 3341 /* For small packets <= CHECKSUM_BREAK peform checksum complete directly
 3342  * in checksum_init.
 3343  */
 3344 #define CHECKSUM_BREAK 76
 3345 
 3346 /* Unset checksum-complete
 3347  *
 3348  * Unset checksum complete can be done when packet is being modified
 3349  * (uncompressed for instance) and checksum-complete value is
 3350  * invalidated.
 3351  */
 3352 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
 3353 {
 3354 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 3355 		skb->ip_summed = CHECKSUM_NONE;
 3356 }
 3357 
 3358 /* Validate (init) checksum based on checksum complete.
 3359  *
 3360  * Return values:
 3361  *   0: checksum is validated or try to in skb_checksum_complete. In the latter
 3362  *	case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
 3363  *	checksum is stored in skb->csum for use in __skb_checksum_complete
 3364  *   non-zero: value of invalid checksum
 3365  *
 3366  */
 3367 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
 3368 						       bool complete,
 3369 						       __wsum psum)
 3370 {
 3371 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 3372 		if (!csum_fold(csum_add(psum, skb->csum))) {
 3373 			skb->csum_valid = 1;
 3374 			return 0;
 3375 		}
 3376 	} else if (skb->csum_bad) {
 3377 		/* ip_summed == CHECKSUM_NONE in this case */
 3378 		return (__force __sum16)1;
 3379 	}
 3380 
 3381 	skb->csum = psum;
 3382 
 3383 	if (complete || skb->len <= CHECKSUM_BREAK) {
 3384 		__sum16 csum;
 3385 
 3386 		csum = __skb_checksum_complete(skb);
 3387 		skb->csum_valid = !csum;
 3388 		return csum;
 3389 	}
 3390 
 3391 	return 0;
 3392 }
 3393 
 3394 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
 3395 {
 3396 	return 0;
 3397 }
 3398 
 3399 /* Perform checksum validate (init). Note that this is a macro since we only
 3400  * want to calculate the pseudo header which is an input function if necessary.
 3401  * First we try to validate without any computation (checksum unnecessary) and
 3402  * then calculate based on checksum complete calling the function to compute
 3403  * pseudo header.
 3404  *
 3405  * Return values:
 3406  *   0: checksum is validated or try to in skb_checksum_complete
 3407  *   non-zero: value of invalid checksum
 3408  */
 3409 #define __skb_checksum_validate(skb, proto, complete,			\
 3410 				zero_okay, check, compute_pseudo)	\
 3411 ({									\
 3412 	__sum16 __ret = 0;						\
 3413 	skb->csum_valid = 0;						\
 3414 	if (__skb_checksum_validate_needed(skb, zero_okay, check))	\
 3415 		__ret = __skb_checksum_validate_complete(skb,		\
 3416 				complete, compute_pseudo(skb, proto));	\
 3417 	__ret;								\
 3418 })
 3419 
 3420 #define skb_checksum_init(skb, proto, compute_pseudo)			\
 3421 	__skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
 3422 
 3423 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo)	\
 3424 	__skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
 3425 
 3426 #define skb_checksum_validate(skb, proto, compute_pseudo)		\
 3427 	__skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
 3428 
 3429 #define skb_checksum_validate_zero_check(skb, proto, check,		\
 3430 					 compute_pseudo)		\
 3431 	__skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
 3432 
 3433 #define skb_checksum_simple_validate(skb)				\
 3434 	__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
 3435 
 3436 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
 3437 {
 3438 	return (skb->ip_summed == CHECKSUM_NONE &&
 3439 		skb->csum_valid && !skb->csum_bad);
 3440 }
 3441 
 3442 static inline void __skb_checksum_convert(struct sk_buff *skb,
 3443 					  __sum16 check, __wsum pseudo)
 3444 {
 3445 	skb->csum = ~pseudo;
 3446 	skb->ip_summed = CHECKSUM_COMPLETE;
 3447 }
 3448 
 3449 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo)	\
 3450 do {									\
 3451 	if (__skb_checksum_convert_check(skb))				\
 3452 		__skb_checksum_convert(skb, check,			\
 3453 				       compute_pseudo(skb, proto));	\
 3454 } while (0)
 3455 
 3456 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
 3457 					      u16 start, u16 offset)
 3458 {
 3459 	skb->ip_summed = CHECKSUM_PARTIAL;
 3460 	skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
 3461 	skb->csum_offset = offset - start;
 3462 }
 3463 
 3464 /* Update skbuf and packet to reflect the remote checksum offload operation.
 3465  * When called, ptr indicates the starting point for skb->csum when
 3466  * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
 3467  * here, skb_postpull_rcsum is done so skb->csum start is ptr.
 3468  */
 3469 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
 3470 				       int start, int offset, bool nopartial)
 3471 {
 3472 	__wsum delta;
 3473 
 3474 	if (!nopartial) {
 3475 		skb_remcsum_adjust_partial(skb, ptr, start, offset);
 3476 		return;
 3477 	}
 3478 
 3479 	 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
 3480 		__skb_checksum_complete(skb);
 3481 		skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
 3482 	}
 3483 
 3484 	delta = remcsum_adjust(ptr, skb->csum, start, offset);
 3485 
 3486 	/* Adjust skb->csum since we changed the packet */
 3487 	skb->csum = csum_add(skb->csum, delta);
 3488 }
 3489 
 3490 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 3491 void nf_conntrack_destroy(struct nf_conntrack *nfct);
 3492 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
 3493 {
 3494 	if (nfct && atomic_dec_and_test(&nfct->use))
 3495 		nf_conntrack_destroy(nfct);
 3496 }
 3497 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
 3498 {
 3499 	if (nfct)
 3500 		atomic_inc(&nfct->use);
 3501 }
 3502 #endif
 3503 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 3504 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
 3505 {
 3506 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
 3507 		kfree(nf_bridge);
 3508 }
 3509 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
 3510 {
 3511 	if (nf_bridge)
 3512 		atomic_inc(&nf_bridge->use);
 3513 }
 3514 #endif /* CONFIG_BRIDGE_NETFILTER */
 3515 static inline void nf_reset(struct sk_buff *skb)
 3516 {
 3517 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 3518 	nf_conntrack_put(skb->nfct);
 3519 	skb->nfct = NULL;
 3520 #endif
 3521 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 3522 	nf_bridge_put(skb->nf_bridge);
 3523 	skb->nf_bridge = NULL;
 3524 #endif
 3525 }
 3526 
 3527 static inline void nf_reset_trace(struct sk_buff *skb)
 3528 {
 3529 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
 3530 	skb->nf_trace = 0;
 3531 #endif
 3532 }
 3533 
 3534 /* Note: This doesn't put any conntrack and bridge info in dst. */
 3535 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
 3536 			     bool copy)
 3537 {
 3538 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 3539 	dst->nfct = src->nfct;
 3540 	nf_conntrack_get(src->nfct);
 3541 	if (copy)
 3542 		dst->nfctinfo = src->nfctinfo;
 3543 #endif
 3544 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 3545 	dst->nf_bridge  = src->nf_bridge;
 3546 	nf_bridge_get(src->nf_bridge);
 3547 #endif
 3548 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
 3549 	if (copy)
 3550 		dst->nf_trace = src->nf_trace;
 3551 #endif
 3552 }
 3553 
 3554 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
 3555 {
 3556 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 3557 	nf_conntrack_put(dst->nfct);
 3558 #endif
 3559 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 3560 	nf_bridge_put(dst->nf_bridge);
 3561 #endif
 3562 	__nf_copy(dst, src, true);
 3563 }
 3564 
 3565 #ifdef CONFIG_NETWORK_SECMARK
 3566 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
 3567 {
 3568 	to->secmark = from->secmark;
 3569 }
 3570 
 3571 static inline void skb_init_secmark(struct sk_buff *skb)
 3572 {
 3573 	skb->secmark = 0;
 3574 }
 3575 #else
 3576 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
 3577 { }
 3578 
 3579 static inline void skb_init_secmark(struct sk_buff *skb)
 3580 { }
 3581 #endif
 3582 
 3583 static inline bool skb_irq_freeable(const struct sk_buff *skb)
 3584 {
 3585 	return !skb->destructor &&
 3586 #if IS_ENABLED(CONFIG_XFRM)
 3587 		!skb->sp &&
 3588 #endif
 3589 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 3590 		!skb->nfct &&
 3591 #endif
 3592 		!skb->_skb_refdst &&
 3593 		!skb_has_frag_list(skb);
 3594 }
 3595 
 3596 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
 3597 {
 3598 	skb->queue_mapping = queue_mapping;
 3599 }
 3600 
 3601 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
 3602 {
 3603 	return skb->queue_mapping;
 3604 }
 3605 
 3606 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
 3607 {
 3608 	to->queue_mapping = from->queue_mapping;
 3609 }
 3610 
 3611 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
 3612 {
 3613 	skb->queue_mapping = rx_queue + 1;
 3614 }
 3615 
 3616 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
 3617 {
 3618 	return skb->queue_mapping - 1;
 3619 }
 3620 
 3621 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
 3622 {
 3623 	return skb->queue_mapping != 0;
 3624 }
 3625 
 3626 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 3627 {
 3628 #ifdef CONFIG_XFRM
 3629 	return skb->sp;
 3630 #else
 3631 	return NULL;
 3632 #endif
 3633 }
 3634 
 3635 /* Keeps track of mac header offset relative to skb->head.
 3636  * It is useful for TSO of Tunneling protocol. e.g. GRE.
 3637  * For non-tunnel skb it points to skb_mac_header() and for
 3638  * tunnel skb it points to outer mac header.
 3639  * Keeps track of level of encapsulation of network headers.
 3640  */
 3641 struct skb_gso_cb {
 3642 	union {
 3643 		int	mac_offset;
 3644 		int	data_offset;
 3645 	};
 3646 	int	encap_level;
 3647 	__wsum	csum;
 3648 	__u16	csum_start;
 3649 };
 3650 #define SKB_SGO_CB_OFFSET	32
 3651 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
 3652 
 3653 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
 3654 {
 3655 	return (skb_mac_header(inner_skb) - inner_skb->head) -
 3656 		SKB_GSO_CB(inner_skb)->mac_offset;
 3657 }
 3658 
 3659 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
 3660 {
 3661 	int new_headroom, headroom;
 3662 	int ret;
 3663 
 3664 	headroom = skb_headroom(skb);
 3665 	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
 3666 	if (ret)
 3667 		return ret;
 3668 
 3669 	new_headroom = skb_headroom(skb);
 3670 	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
 3671 	return 0;
 3672 }
 3673 
 3674 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
 3675 {
 3676 	/* Do not update partial checksums if remote checksum is enabled. */
 3677 	if (skb->remcsum_offload)
 3678 		return;
 3679 
 3680 	SKB_GSO_CB(skb)->csum = res;
 3681 	SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
 3682 }
 3683 
 3684 /* Compute the checksum for a gso segment. First compute the checksum value
 3685  * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
 3686  * then add in skb->csum (checksum from csum_start to end of packet).
 3687  * skb->csum and csum_start are then updated to reflect the checksum of the
 3688  * resultant packet starting from the transport header-- the resultant checksum
 3689  * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
 3690  * header.
 3691  */
 3692 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
 3693 {
 3694 	unsigned char *csum_start = skb_transport_header(skb);
 3695 	int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
 3696 	__wsum partial = SKB_GSO_CB(skb)->csum;
 3697 
 3698 	SKB_GSO_CB(skb)->csum = res;
 3699 	SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
 3700 
 3701 	return csum_fold(csum_partial(csum_start, plen, partial));
 3702 }
 3703 
 3704 static inline bool skb_is_gso(const struct sk_buff *skb)
 3705 {
 3706 	return skb_shinfo(skb)->gso_size;
 3707 }
 3708 
 3709 /* Note: Should be called only if skb_is_gso(skb) is true */
 3710 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
 3711 {
 3712 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
 3713 }
 3714 
 3715 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
 3716 
 3717 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
 3718 {
 3719 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
 3720 	 * wanted then gso_type will be set. */
 3721 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
 3722 
 3723 	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
 3724 	    unlikely(shinfo->gso_type == 0)) {
 3725 		__skb_warn_lro_forwarding(skb);
 3726 		return true;
 3727 	}
 3728 	return false;
 3729 }
 3730 
 3731 static inline void skb_forward_csum(struct sk_buff *skb)
 3732 {
 3733 	/* Unfortunately we don't support this one.  Any brave souls? */
 3734 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 3735 		skb->ip_summed = CHECKSUM_NONE;
 3736 }
 3737 
 3738 /**
 3739  * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
 3740  * @skb: skb to check
 3741  *
 3742  * fresh skbs have their ip_summed set to CHECKSUM_NONE.
 3743  * Instead of forcing ip_summed to CHECKSUM_NONE, we can
 3744  * use this helper, to document places where we make this assertion.
 3745  */
 3746 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
 3747 {
 3748 #ifdef DEBUG
 3749 	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
 3750 #endif
 3751 }
 3752 
 3753 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
 3754 
 3755 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
 3756 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
 3757 				     unsigned int transport_len,
 3758 				     __sum16(*skb_chkf)(struct sk_buff *skb));
 3759 
 3760 /**
 3761  * skb_head_is_locked - Determine if the skb->head is locked down
 3762  * @skb: skb to check
 3763  *
 3764  * The head on skbs build around a head frag can be removed if they are
 3765  * not cloned.  This function returns true if the skb head is locked down
 3766  * due to either being allocated via kmalloc, or by being a clone with
 3767  * multiple references to the head.
 3768  */
 3769 static inline bool skb_head_is_locked(const struct sk_buff *skb)
 3770 {
 3771 	return !skb->head_frag || skb_cloned(skb);
 3772 }
 3773 
 3774 /**
 3775  * skb_gso_network_seglen - Return length of individual segments of a gso packet
 3776  *
 3777  * @skb: GSO skb
 3778  *
 3779  * skb_gso_network_seglen is used to determine the real size of the
 3780  * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
 3781  *
 3782  * The MAC/L2 header is not accounted for.
 3783  */
 3784 static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
 3785 {
 3786 	unsigned int hdr_len = skb_transport_header(skb) -
 3787 			       skb_network_header(skb);
 3788 	return hdr_len + skb_gso_transport_seglen(skb);
 3789 }
 3790 
 3791 /* Local Checksum Offload.
 3792  * Compute outer checksum based on the assumption that the
 3793  * inner checksum will be offloaded later.
 3794  * See Documentation/networking/checksum-offloads.txt for
 3795  * explanation of how this works.
 3796  * Fill in outer checksum adjustment (e.g. with sum of outer
 3797  * pseudo-header) before calling.
 3798  * Also ensure that inner checksum is in linear data area.
 3799  */
 3800 static inline __wsum lco_csum(struct sk_buff *skb)
 3801 {
 3802 	unsigned char *csum_start = skb_checksum_start(skb);
 3803 	unsigned char *l4_hdr = skb_transport_header(skb);
 3804 	__wsum partial;
 3805 
 3806 	/* Start with complement of inner checksum adjustment */
 3807 	partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
 3808 						    skb->csum_offset));
 3809 
 3810 	/* Add in checksum of our headers (incl. outer checksum
 3811 	 * adjustment filled in by caller) and return result.
 3812 	 */
 3813 	return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
 3814 }
 3815 
 3816 #endif	/* __KERNEL__ */
 3817 #endif	/* _LINUX_SKBUFF_H */           1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with LOADs and STOREs inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /**
  134  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  135  * @lock: the spinlock in question.
  136  */
  137 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  138 
  139 #ifdef CONFIG_DEBUG_SPINLOCK
  140  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  142  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  143  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  144 #else
  145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  146 {
  147 	__acquire(lock);
  148 	arch_spin_lock(&lock->raw_lock);
  149 }
  150 
  151 static inline void
  152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  153 {
  154 	__acquire(lock);
  155 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  156 }
  157 
  158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  159 {
  160 	return arch_spin_trylock(&(lock)->raw_lock);
  161 }
  162 
  163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  164 {
  165 	arch_spin_unlock(&lock->raw_lock);
  166 	__release(lock);
  167 }
  168 #endif
  169 
  170 /*
  171  * Define the various spin_lock methods.  Note we define these
  172  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  173  * various methods are defined as nops in the case they are not
  174  * required.
  175  */
  176 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  177 
  178 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  179 
  180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  181 # define raw_spin_lock_nested(lock, subclass) \
  182 	_raw_spin_lock_nested(lock, subclass)
  183 # define raw_spin_lock_bh_nested(lock, subclass) \
  184 	_raw_spin_lock_bh_nested(lock, subclass)
  185 
  186 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  187 	 do {								\
  188 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  189 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  190 	 } while (0)
  191 #else
  192 /*
  193  * Always evaluate the 'subclass' argument to avoid that the compiler
  194  * warns about set-but-not-used variables when building with
  195  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  196  */
  197 # define raw_spin_lock_nested(lock, subclass)		\
  198 	_raw_spin_lock(((void)(subclass), (lock)))
  199 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  200 # define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
  201 #endif
  202 
  203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  204 
  205 #define raw_spin_lock_irqsave(lock, flags)			\
  206 	do {						\
  207 		typecheck(unsigned long, flags);	\
  208 		flags = _raw_spin_lock_irqsave(lock);	\
  209 	} while (0)
  210 
  211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  213 	do {								\
  214 		typecheck(unsigned long, flags);			\
  215 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  216 	} while (0)
  217 #else
  218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  219 	do {								\
  220 		typecheck(unsigned long, flags);			\
  221 		flags = _raw_spin_lock_irqsave(lock);			\
  222 	} while (0)
  223 #endif
  224 
  225 #else
  226 
  227 #define raw_spin_lock_irqsave(lock, flags)		\
  228 	do {						\
  229 		typecheck(unsigned long, flags);	\
  230 		_raw_spin_lock_irqsave(lock, flags);	\
  231 	} while (0)
  232 
  233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  234 	raw_spin_lock_irqsave(lock, flags)
  235 
  236 #endif
  237 
  238 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  239 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  240 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  241 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  242 
  243 #define raw_spin_unlock_irqrestore(lock, flags)		\
  244 	do {							\
  245 		typecheck(unsigned long, flags);		\
  246 		_raw_spin_unlock_irqrestore(lock, flags);	\
  247 	} while (0)
  248 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  249 
  250 #define raw_spin_trylock_bh(lock) \
  251 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  252 
  253 #define raw_spin_trylock_irq(lock) \
  254 ({ \
  255 	local_irq_disable(); \
  256 	raw_spin_trylock(lock) ? \
  257 	1 : ({ local_irq_enable(); 0;  }); \
  258 })
  259 
  260 #define raw_spin_trylock_irqsave(lock, flags) \
  261 ({ \
  262 	local_irq_save(flags); \
  263 	raw_spin_trylock(lock) ? \
  264 	1 : ({ local_irq_restore(flags); 0; }); \
  265 })
  266 
  267 /**
  268  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  269  * @lock: the spinlock in question.
  270  */
  271 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  272 
  273 /* Include rwlock functions */
  274 #include <linux/rwlock.h>
  275 
  276 /*
  277  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  278  */
  279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  280 # include <linux/spinlock_api_smp.h>
  281 #else
  282 # include <linux/spinlock_api_up.h>
  283 #endif
  284 
  285 /*
  286  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  287  */
  288 
  289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  290 {
  291 	return &lock->rlock;
  292 }
  293 
  294 #define spin_lock_init(_lock)				\
  295 do {							\
  296 	spinlock_check(_lock);				\
  297 	raw_spin_lock_init(&(_lock)->rlock);		\
  298 } while (0)
  299 
  300 static __always_inline void spin_lock(spinlock_t *lock)
  301 {
  302 	raw_spin_lock(&lock->rlock);
  303 }
  304 
  305 static __always_inline void spin_lock_bh(spinlock_t *lock)
  306 {
  307 	raw_spin_lock_bh(&lock->rlock);
  308 }
  309 
  310 static __always_inline int spin_trylock(spinlock_t *lock)
  311 {
  312 	return raw_spin_trylock(&lock->rlock);
  313 }
  314 
  315 #define spin_lock_nested(lock, subclass)			\
  316 do {								\
  317 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  318 } while (0)
  319 
  320 #define spin_lock_bh_nested(lock, subclass)			\
  321 do {								\
  322 	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
  323 } while (0)
  324 
  325 #define spin_lock_nest_lock(lock, nest_lock)				\
  326 do {									\
  327 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  328 } while (0)
  329 
  330 static __always_inline void spin_lock_irq(spinlock_t *lock)
  331 {
  332 	raw_spin_lock_irq(&lock->rlock);
  333 }
  334 
  335 #define spin_lock_irqsave(lock, flags)				\
  336 do {								\
  337 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  338 } while (0)
  339 
  340 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  341 do {									\
  342 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  343 } while (0)
  344 
  345 static __always_inline void spin_unlock(spinlock_t *lock)
  346 {
  347 	raw_spin_unlock(&lock->rlock);
  348 }
  349 
  350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
  351 {
  352 	raw_spin_unlock_bh(&lock->rlock);
  353 }
  354 
  355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
  356 {
  357 	raw_spin_unlock_irq(&lock->rlock);
  358 }
  359 
  360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  361 {
  362 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  363 }
  364 
  365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
  366 {
  367 	return raw_spin_trylock_bh(&lock->rlock);
  368 }
  369 
  370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
  371 {
  372 	return raw_spin_trylock_irq(&lock->rlock);
  373 }
  374 
  375 #define spin_trylock_irqsave(lock, flags)			\
  376 ({								\
  377 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  378 })
  379 
  380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
  381 {
  382 	raw_spin_unlock_wait(&lock->rlock);
  383 }
  384 
  385 static __always_inline int spin_is_locked(spinlock_t *lock)
  386 {
  387 	return raw_spin_is_locked(&lock->rlock);
  388 }
  389 
  390 static __always_inline int spin_is_contended(spinlock_t *lock)
  391 {
  392 	return raw_spin_is_contended(&lock->rlock);
  393 }
  394 
  395 static __always_inline int spin_can_lock(spinlock_t *lock)
  396 {
  397 	return raw_spin_can_lock(&lock->rlock);
  398 }
  399 
  400 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  401 
  402 /*
  403  * Pull the atomic_t declaration:
  404  * (asm-mips/atomic.h needs above definitions)
  405  */
  406 #include <linux/atomic.h>
  407 /**
  408  * atomic_dec_and_lock - lock on reaching reference count zero
  409  * @atomic: the atomic counter
  410  * @lock: the spinlock in question
  411  *
  412  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  413  * @lock.  Returns false for all other cases.
  414  */
  415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  416 #define atomic_dec_and_lock(atomic, lock) \
  417 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  418 
  419 #endif /* __LINUX_SPINLOCK_H */           1 #ifndef _UAPI_LINUX_SWAB_H
    2 #define _UAPI_LINUX_SWAB_H
    3 
    4 #include <linux/types.h>
    5 #include <linux/compiler.h>
    6 #include <asm/swab.h>
    7 
    8 /*
    9  * casts are necessary for constants, because we never know how for sure
   10  * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
   11  */
   12 #define ___constant_swab16(x) ((__u16)(				\
   13 	(((__u16)(x) & (__u16)0x00ffU) << 8) |			\
   14 	(((__u16)(x) & (__u16)0xff00U) >> 8)))
   15 
   16 #define ___constant_swab32(x) ((__u32)(				\
   17 	(((__u32)(x) & (__u32)0x000000ffUL) << 24) |		\
   18 	(((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |		\
   19 	(((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |		\
   20 	(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
   21 
   22 #define ___constant_swab64(x) ((__u64)(				\
   23 	(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) |	\
   24 	(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) |	\
   25 	(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) |	\
   26 	(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) |	\
   27 	(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) |	\
   28 	(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) |	\
   29 	(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) |	\
   30 	(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
   31 
   32 #define ___constant_swahw32(x) ((__u32)(			\
   33 	(((__u32)(x) & (__u32)0x0000ffffUL) << 16) |		\
   34 	(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
   35 
   36 #define ___constant_swahb32(x) ((__u32)(			\
   37 	(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) |		\
   38 	(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
   39 
   40 /*
   41  * Implement the following as inlines, but define the interface using
   42  * macros to allow constant folding when possible:
   43  * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
   44  */
   45 
   46 static inline __attribute_const__ __u16 __fswab16(__u16 val)
   47 {
   48 #if defined (__arch_swab16)
   49 	return __arch_swab16(val);
   50 #else
   51 	return ___constant_swab16(val);
   52 #endif
   53 }
   54 
   55 static inline __attribute_const__ __u32 __fswab32(__u32 val)
   56 {
   57 #if defined(__arch_swab32)
   58 	return __arch_swab32(val);
   59 #else
   60 	return ___constant_swab32(val);
   61 #endif
   62 }
   63 
   64 static inline __attribute_const__ __u64 __fswab64(__u64 val)
   65 {
   66 #if defined (__arch_swab64)
   67 	return __arch_swab64(val);
   68 #elif defined(__SWAB_64_THRU_32__)
   69 	__u32 h = val >> 32;
   70 	__u32 l = val & ((1ULL << 32) - 1);
   71 	return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
   72 #else
   73 	return ___constant_swab64(val);
   74 #endif
   75 }
   76 
   77 static inline __attribute_const__ __u32 __fswahw32(__u32 val)
   78 {
   79 #ifdef __arch_swahw32
   80 	return __arch_swahw32(val);
   81 #else
   82 	return ___constant_swahw32(val);
   83 #endif
   84 }
   85 
   86 static inline __attribute_const__ __u32 __fswahb32(__u32 val)
   87 {
   88 #ifdef __arch_swahb32
   89 	return __arch_swahb32(val);
   90 #else
   91 	return ___constant_swahb32(val);
   92 #endif
   93 }
   94 
   95 /**
   96  * __swab16 - return a byteswapped 16-bit value
   97  * @x: value to byteswap
   98  */
   99 #ifdef __HAVE_BUILTIN_BSWAP16__
  100 #define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
  101 #else
  102 #define __swab16(x)				\
  103 	(__builtin_constant_p((__u16)(x)) ?	\
  104 	___constant_swab16(x) :			\
  105 	__fswab16(x))
  106 #endif
  107 
  108 /**
  109  * __swab32 - return a byteswapped 32-bit value
  110  * @x: value to byteswap
  111  */
  112 #ifdef __HAVE_BUILTIN_BSWAP32__
  113 #define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
  114 #else
  115 #define __swab32(x)				\
  116 	(__builtin_constant_p((__u32)(x)) ?	\
  117 	___constant_swab32(x) :			\
  118 	__fswab32(x))
  119 #endif
  120 
  121 /**
  122  * __swab64 - return a byteswapped 64-bit value
  123  * @x: value to byteswap
  124  */
  125 #ifdef __HAVE_BUILTIN_BSWAP64__
  126 #define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
  127 #else
  128 #define __swab64(x)				\
  129 	(__builtin_constant_p((__u64)(x)) ?	\
  130 	___constant_swab64(x) :			\
  131 	__fswab64(x))
  132 #endif
  133 
  134 /**
  135  * __swahw32 - return a word-swapped 32-bit value
  136  * @x: value to wordswap
  137  *
  138  * __swahw32(0x12340000) is 0x00001234
  139  */
  140 #define __swahw32(x)				\
  141 	(__builtin_constant_p((__u32)(x)) ?	\
  142 	___constant_swahw32(x) :		\
  143 	__fswahw32(x))
  144 
  145 /**
  146  * __swahb32 - return a high and low byte-swapped 32-bit value
  147  * @x: value to byteswap
  148  *
  149  * __swahb32(0x12345678) is 0x34127856
  150  */
  151 #define __swahb32(x)				\
  152 	(__builtin_constant_p((__u32)(x)) ?	\
  153 	___constant_swahb32(x) :		\
  154 	__fswahb32(x))
  155 
  156 /**
  157  * __swab16p - return a byteswapped 16-bit value from a pointer
  158  * @p: pointer to a naturally-aligned 16-bit value
  159  */
  160 static __always_inline __u16 __swab16p(const __u16 *p)
  161 {
  162 #ifdef __arch_swab16p
  163 	return __arch_swab16p(p);
  164 #else
  165 	return __swab16(*p);
  166 #endif
  167 }
  168 
  169 /**
  170  * __swab32p - return a byteswapped 32-bit value from a pointer
  171  * @p: pointer to a naturally-aligned 32-bit value
  172  */
  173 static __always_inline __u32 __swab32p(const __u32 *p)
  174 {
  175 #ifdef __arch_swab32p
  176 	return __arch_swab32p(p);
  177 #else
  178 	return __swab32(*p);
  179 #endif
  180 }
  181 
  182 /**
  183  * __swab64p - return a byteswapped 64-bit value from a pointer
  184  * @p: pointer to a naturally-aligned 64-bit value
  185  */
  186 static __always_inline __u64 __swab64p(const __u64 *p)
  187 {
  188 #ifdef __arch_swab64p
  189 	return __arch_swab64p(p);
  190 #else
  191 	return __swab64(*p);
  192 #endif
  193 }
  194 
  195 /**
  196  * __swahw32p - return a wordswapped 32-bit value from a pointer
  197  * @p: pointer to a naturally-aligned 32-bit value
  198  *
  199  * See __swahw32() for details of wordswapping.
  200  */
  201 static inline __u32 __swahw32p(const __u32 *p)
  202 {
  203 #ifdef __arch_swahw32p
  204 	return __arch_swahw32p(p);
  205 #else
  206 	return __swahw32(*p);
  207 #endif
  208 }
  209 
  210 /**
  211  * __swahb32p - return a high and low byteswapped 32-bit value from a pointer
  212  * @p: pointer to a naturally-aligned 32-bit value
  213  *
  214  * See __swahb32() for details of high/low byteswapping.
  215  */
  216 static inline __u32 __swahb32p(const __u32 *p)
  217 {
  218 #ifdef __arch_swahb32p
  219 	return __arch_swahb32p(p);
  220 #else
  221 	return __swahb32(*p);
  222 #endif
  223 }
  224 
  225 /**
  226  * __swab16s - byteswap a 16-bit value in-place
  227  * @p: pointer to a naturally-aligned 16-bit value
  228  */
  229 static inline void __swab16s(__u16 *p)
  230 {
  231 #ifdef __arch_swab16s
  232 	__arch_swab16s(p);
  233 #else
  234 	*p = __swab16p(p);
  235 #endif
  236 }
  237 /**
  238  * __swab32s - byteswap a 32-bit value in-place
  239  * @p: pointer to a naturally-aligned 32-bit value
  240  */
  241 static __always_inline void __swab32s(__u32 *p)
  242 {
  243 #ifdef __arch_swab32s
  244 	__arch_swab32s(p);
  245 #else
  246 	*p = __swab32p(p);
  247 #endif
  248 }
  249 
  250 /**
  251  * __swab64s - byteswap a 64-bit value in-place
  252  * @p: pointer to a naturally-aligned 64-bit value
  253  */
  254 static __always_inline void __swab64s(__u64 *p)
  255 {
  256 #ifdef __arch_swab64s
  257 	__arch_swab64s(p);
  258 #else
  259 	*p = __swab64p(p);
  260 #endif
  261 }
  262 
  263 /**
  264  * __swahw32s - wordswap a 32-bit value in-place
  265  * @p: pointer to a naturally-aligned 32-bit value
  266  *
  267  * See __swahw32() for details of wordswapping
  268  */
  269 static inline void __swahw32s(__u32 *p)
  270 {
  271 #ifdef __arch_swahw32s
  272 	__arch_swahw32s(p);
  273 #else
  274 	*p = __swahw32p(p);
  275 #endif
  276 }
  277 
  278 /**
  279  * __swahb32s - high and low byteswap a 32-bit value in-place
  280  * @p: pointer to a naturally-aligned 32-bit value
  281  *
  282  * See __swahb32() for details of high and low byte swapping
  283  */
  284 static inline void __swahb32s(__u32 *p)
  285 {
  286 #ifdef __arch_swahb32s
  287 	__arch_swahb32s(p);
  288 #else
  289 	*p = __swahb32p(p);
  290 #endif
  291 }
  292 
  293 
  294 #endif /* _UAPI_LINUX_SWAB_H */       | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report | 
| linux-4.8-rc1.tar.xz | drivers/infiniband/sw/rxe/rdma_rxe.ko | 43_1a | CPAchecker | Bug | Fixed | 2016-09-02 23:47:50 | L0244 | 
Comment
Reported: 2 Sep 2016
[Home]
