Error Trace
        
                          [Home]
Bug # 103
Show/hide error trace|            Error trace     
         {    19     typedef signed char __s8;    20     typedef unsigned char __u8;    22     typedef short __s16;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    18     typedef short s16;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    32     typedef __u16 __le16;    33     typedef __u16 __be16;    34     typedef __u32 __le32;    35     typedef __u32 __be32;    37     typedef __u64 __be64;    40     typedef __u32 __wsum;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;    83     typedef unsigned char u_char;   102     typedef __s32 int32_t;   106     typedef __u8 uint8_t;   108     typedef __u32 uint32_t;   111     typedef __u64 uint64_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   146     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   159     typedef unsigned int oom_flags_t;   162     typedef u64 phys_addr_t;   167     typedef phys_addr_t resource_size_t;   177     struct __anonstruct_atomic_t_6 {   int counter; } ;   177     typedef struct __anonstruct_atomic_t_6 atomic_t;   182     struct __anonstruct_atomic64_t_7 {   long counter; } ;   182     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   183     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   188     struct hlist_node ;   188     struct hlist_head {   struct hlist_node *first; } ;   192     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   203     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;   213     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;     5     struct device ;     5     struct page ;     7     struct dma_attrs ;    23     struct kernel_symbol {   unsigned long value;   const char *name; } ;    33     struct module ;    65     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    59     struct __anonstruct____missing_field_name_9 {   unsigned int a;   unsigned int b; } ;    59     struct __anonstruct____missing_field_name_10 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    59     union __anonunion____missing_field_name_8 {   struct __anonstruct____missing_field_name_9 __annonCompField4;   struct __anonstruct____missing_field_name_10 __annonCompField5; } ;    59     struct desc_struct {   union __anonunion____missing_field_name_8 __annonCompField6; } ;    15     typedef unsigned long pgdval_t;    16     typedef unsigned long pgprotval_t;    20     struct pgprot {   pgprotval_t pgprot; } ;   243     typedef struct pgprot pgprot_t;   245     struct __anonstruct_pgd_t_12 {   pgdval_t pgd; } ;   245     typedef struct __anonstruct_pgd_t_12 pgd_t;   333     typedef struct page *pgtable_t;   341     struct file ;   354     struct seq_file ;   389     struct thread_struct ;   391     struct mm_struct ;   392     struct task_struct ;   393     struct cpumask ;   327     struct arch_spinlock ;    18     typedef u16 __ticket_t;    19     typedef u32 __ticketpair_t;    20     struct __raw_tickets {   __ticket_t head;   __ticket_t tail; } ;    32     union __anonunion____missing_field_name_15 {   __ticketpair_t head_tail;   struct __raw_tickets tickets; } ;    32     struct arch_spinlock {   union __anonunion____missing_field_name_15 __annonCompField7; } ;    33     typedef struct arch_spinlock arch_spinlock_t;    33     struct __anonstruct____missing_field_name_17 {   u32 read;   s32 write; } ;    33     union __anonunion_arch_rwlock_t_16 {   s64 lock;   struct __anonstruct____missing_field_name_17 __annonCompField8; } ;    33     typedef union __anonunion_arch_rwlock_t_16 arch_rwlock_t;   142     typedef void (*ctor_fn_t)();    54     struct net_device ;   376     struct file_operations ;   388     struct completion ;   416     struct pid ;   527     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   102     struct timespec ;   127     struct kernel_vm86_regs {   struct pt_regs pt;   unsigned short es;   unsigned short __esh;   unsigned short ds;   unsigned short __dsh;   unsigned short fs;   unsigned short __fsh;   unsigned short gs;   unsigned short __gsh; } ;    79     union __anonunion____missing_field_name_22 {   struct pt_regs *regs;   struct kernel_vm86_regs *vm86; } ;    79     struct math_emu_info {   long ___orig_eip;   union __anonunion____missing_field_name_22 __annonCompField10; } ;   306     struct cpumask {   unsigned long bits[128U]; } ;    14     typedef struct cpumask cpumask_t;   663     typedef struct cpumask *cpumask_var_t;   195     struct static_key ;   162     struct seq_operations ;   294     struct i387_fsave_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;   312     struct __anonstruct____missing_field_name_27 {   u64 rip;   u64 rdp; } ;   312     struct __anonstruct____missing_field_name_28 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;   312     union __anonunion____missing_field_name_26 {   struct __anonstruct____missing_field_name_27 __annonCompField14;   struct __anonstruct____missing_field_name_28 __annonCompField15; } ;   312     union __anonunion____missing_field_name_29 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;   312     struct i387_fxsave_struct {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_26 __annonCompField16;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_29 __annonCompField17; } ;   346     struct i387_soft_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   367     struct ymmh_struct {   u32 ymmh_space[64U]; } ;   372     struct lwp_struct {   u8 reserved[128U]; } ;   377     struct bndregs_struct {   u64 bndregs[8U]; } ;   381     struct bndcsr_struct {   u64 cfg_reg_u;   u64 status_reg; } ;   386     struct xsave_hdr_struct {   u64 xstate_bv;   u64 reserved1[2U];   u64 reserved2[5U]; } ;   392     struct xsave_struct {   struct i387_fxsave_struct i387;   struct xsave_hdr_struct xsave_hdr;   struct ymmh_struct ymmh;   struct lwp_struct lwp;   struct bndregs_struct bndregs;   struct bndcsr_struct bndcsr; } ;   401     union thread_xstate {   struct i387_fsave_struct fsave;   struct i387_fxsave_struct fxsave;   struct i387_soft_struct soft;   struct xsave_struct xsave; } ;   409     struct fpu {   unsigned int last_cpu;   unsigned int has_fpu;   union thread_xstate *state; } ;   456     struct kmem_cache ;   457     struct perf_event ;   458     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned long usersp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fs;   unsigned long gs;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   struct fpu fpu;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   unsigned char fpu_counter; } ;    23     typedef atomic64_t atomic_long_t;   152     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    26     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct list_head hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   205     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references; } ;   537     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_33 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_32 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_33 __annonCompField19; } ;    33     struct spinlock {   union __anonunion____missing_field_name_32 __annonCompField20; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_34 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_34 rwlock_t;    59     struct static_key {   atomic_t enabled; } ;   412     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    51     typedef struct seqcount seqcount_t;   259     struct __anonstruct_seqlock_t_35 {   struct seqcount seqcount;   spinlock_t lock; } ;   259     typedef struct __anonstruct_seqlock_t_35 seqlock_t;   433     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_36 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_36 kuid_t;    27     struct __anonstruct_kgid_t_37 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_37 kgid_t;   127     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    34     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    39     typedef struct __wait_queue_head wait_queue_head_t;    98     struct __anonstruct_nodemask_t_38 {   unsigned long bits[16U]; } ;    98     typedef struct __anonstruct_nodemask_t_38 nodemask_t;   799     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   const char *name;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   177     struct rw_semaphore ;   178     struct rw_semaphore {   long count;   raw_spinlock_t wait_lock;   struct list_head wait_list;   struct lockdep_map dep_map; } ;   155     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   223     struct notifier_block ;   323     union ktime {   s64 tv64; } ;    59     typedef union ktime ktime_t;   388     struct tvec_base ;   389     struct timer_list {   struct list_head entry;   unsigned long expires;   struct tvec_base *base;   void (*function)(unsigned long);   unsigned long data;   int slack;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   266     struct workqueue_struct ;   267     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;    51     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;    63     struct blocking_notifier_head {   struct rw_semaphore rwsem;   struct notifier_block *head; } ;   891     struct ctl_table ;    72     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;   172     struct pci_dev ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   301     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   308     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   316     struct wakeup_source ;   527     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list; } ;   534     struct dev_pm_qos ;   534     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool ignore_children;   bool early_init;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   struct dev_pm_qos *qos; } ;   591     struct dev_pm_domain {   struct dev_pm_ops ops; } ;   133     struct pci_bus ;    22     struct __anonstruct_mm_context_t_103 {   void *ldt;   int size;   unsigned short ia32_compat;   struct mutex lock;   void *vdso; } ;    22     typedef struct __anonstruct_mm_context_t_103 mm_context_t;    18     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    40     struct rb_root {   struct rb_node *rb_node; } ;    87     struct vm_area_struct ;   341     struct device_node ;  1276     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;   835     struct nsproxy ;   836     struct ctl_table_root ;   837     struct ctl_table_header ;   838     struct ctl_dir ;    39     typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);    59     struct ctl_table_poll {   atomic_t event;   wait_queue_head_t wait; } ;    98     struct ctl_table {   const char *procname;   void *data;   int maxlen;   umode_t mode;   struct ctl_table *child;   proc_handler *proc_handler;   struct ctl_table_poll *poll;   void *extra1;   void *extra2; } ;   119     struct ctl_node {   struct rb_node node;   struct ctl_table_header *header; } ;   124     struct __anonstruct____missing_field_name_132 {   struct ctl_table *ctl_table;   int used;   int count;   int nreg; } ;   124     union __anonunion____missing_field_name_131 {   struct __anonstruct____missing_field_name_132 __annonCompField32;   struct callback_head rcu; } ;   124     struct ctl_table_set ;   124     struct ctl_table_header {   union __anonunion____missing_field_name_131 __annonCompField33;   struct completion *unregistering;   struct ctl_table *ctl_table_arg;   struct ctl_table_root *root;   struct ctl_table_set *set;   struct ctl_dir *parent;   struct ctl_node *node; } ;   145     struct ctl_dir {   struct ctl_table_header header;   struct rb_root root; } ;   151     struct ctl_table_set {   int (*is_seen)(struct ctl_table_set *);   struct ctl_dir dir; } ;   156     struct ctl_table_root {   struct ctl_table_set default_set;   struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *);   int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;    37     struct cred ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;    48     struct idr_layer {   int prefix;   unsigned long bitmap[4U];   struct idr_layer *ary[256U];   int count;   int layer;   struct callback_head callback_head; } ;    38     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   struct idr_layer *id_free;   int layers;   int id_free_cnt;   int cur;   spinlock_t lock; } ;   197     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   213     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   245     struct dentry ;   246     struct iattr ;   247     struct super_block ;   248     struct file_system_type ;   249     struct kernfs_open_node ;   250     struct kernfs_iattrs ;   266     struct kernfs_root ;   266     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    62     struct kernfs_node ;    62     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    66     struct kernfs_ops ;    66     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size; } ;    72     union __anonunion_u_137 {   struct completion *completion;   struct kernfs_node *removed_list; } ;    72     union __anonunion____missing_field_name_138 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    72     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   union __anonunion_u_137 u;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_138 __annonCompField34;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   114     struct kernfs_dir_ops {   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;   127     struct kernfs_root {   struct kernfs_node *kn;   struct ida ino_ida;   struct kernfs_dir_ops *dir_ops; } ;   137     struct vm_operations_struct ;   137     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   struct mutex mutex;   int event;   struct list_head list;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   151     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   376     struct sock ;   377     struct kobject ;   378     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   384     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    67     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   130     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   462     struct kref {   atomic_t refcount; } ;    50     struct kset ;    50     struct kobj_type ;    50     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   112     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   120     struct kobj_uevent_env {   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   127     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   144     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   219     struct kernel_param ;   224     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    58     struct kparam_string ;    58     struct kparam_array ;    58     union __anonunion____missing_field_name_139 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    58     struct kernel_param {   const char *name;   const struct kernel_param_ops *ops;   u16 perm;   s16 level;   union __anonunion____missing_field_name_139 __annonCompField35; } ;    70     struct kparam_string {   unsigned int maxlen;   char *string; } ;    76     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   463     struct tracepoint ;   464     struct tracepoint_func {   void *func;   void *data; } ;    29     struct tracepoint {   const char *name;   struct static_key key;   void (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;    92     struct mod_arch_specific { } ;    36     struct module_param_attrs ;    36     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    46     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;    72     struct exception_table_entry ;   208     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   215     struct module_ref {   unsigned long incs;   unsigned long decs; } ;   229     struct module_sect_attrs ;   229     struct module_notes_attrs ;   229     struct ftrace_event_call ;   229     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   void *module_init;   void *module_core;   unsigned int init_size;   unsigned int core_size;   unsigned int init_text_size;   unsigned int core_text_size;   unsigned int init_ro_size;   unsigned int core_ro_size;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   Elf64_Sym *symtab;   Elf64_Sym *core_symtab;   unsigned int num_symtab;   unsigned int core_num_syms;   char *strtab;   char *core_strtab;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct ftrace_event_call **trace_events;   unsigned int num_trace_events;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   struct module_ref *refptr;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    13     typedef unsigned long kernel_ulong_t;    14     struct pci_device_id {   __u32 vendor;   __u32 device;   __u32 subvendor;   __u32 subdevice;   __u32 class;   __u32 class_mask;   kernel_ulong_t driver_data; } ;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data; } ;   219     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   601     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    67     struct path ;    68     struct inode ;    69     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   struct user_namespace *user_ns;   void *private; } ;    35     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   196     struct pinctrl ;   197     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    42     struct dma_map_ops ;    42     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    14     struct device_private ;    15     struct device_driver ;    16     struct driver_private ;    17     struct class ;    18     struct subsys_private ;    19     struct bus_type ;    20     struct iommu_ops ;    21     struct iommu_group ;    60     struct device_attribute ;    60     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   138     struct device_type ;   195     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   321     struct class_attribute ;   321     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   414     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   482     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   510     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   637     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   646     struct acpi_device ;   647     struct acpi_dev_node {   struct acpi_device *companion; } ;   653     struct dma_coherent_mem ;   653     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct dev_pin_info *pins;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct dev_archdata archdata;   struct device_node *of_node;   struct acpi_dev_node acpi_node;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   795     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;    17     struct hotplug_slot ;    17     struct pci_slot {   struct pci_bus *bus;   struct list_head list;   struct hotplug_slot *hotplug;   unsigned char number;   struct kobject kobj; } ;   110     typedef int pci_power_t;   137     typedef unsigned int pci_channel_state_t;   138     enum pci_channel_state {   pci_channel_io_normal = 1,   pci_channel_io_frozen = 2,   pci_channel_io_perm_failure = 3 } ;   163     typedef unsigned short pci_dev_flags_t;   180     typedef unsigned short pci_bus_flags_t;   237     struct pcie_link_state ;   238     struct pci_vpd ;   239     struct pci_sriov ;   240     struct pci_ats ;   241     struct proc_dir_entry ;   241     struct pci_driver ;   241     union __anonunion____missing_field_name_143 {   struct pci_sriov *sriov;   struct pci_dev *physfn; } ;   241     struct pci_dev {   struct list_head bus_list;   struct pci_bus *bus;   struct pci_bus *subordinate;   void *sysdata;   struct proc_dir_entry *procent;   struct pci_slot *slot;   unsigned int devfn;   unsigned short vendor;   unsigned short device;   unsigned short subsystem_vendor;   unsigned short subsystem_device;   unsigned int class;   u8 revision;   u8 hdr_type;   u8 pcie_cap;   u8 msi_cap;   u8 msix_cap;   unsigned char pcie_mpss;   u8 rom_base_reg;   u8 pin;   u16 pcie_flags_reg;   struct pci_driver *driver;   u64 dma_mask;   struct device_dma_parameters dma_parms;   pci_power_t current_state;   u8 pm_cap;   unsigned char pme_support;   unsigned char pme_interrupt;   unsigned char pme_poll;   unsigned char d1_support;   unsigned char d2_support;   unsigned char no_d1d2;   unsigned char no_d3cold;   unsigned char d3cold_allowed;   unsigned char mmio_always_on;   unsigned char wakeup_prepared;   unsigned char runtime_d3cold;   unsigned int d3_delay;   unsigned int d3cold_delay;   struct pcie_link_state *link_state;   pci_channel_state_t error_state;   struct device dev;   int cfg_size;   unsigned int irq;   struct resource resource[17U];   bool match_driver;   unsigned char transparent;   unsigned char multifunction;   unsigned char is_added;   unsigned char is_busmaster;   unsigned char no_msi;   unsigned char block_cfg_access;   unsigned char broken_parity_status;   unsigned char irq_reroute_variant;   unsigned char msi_enabled;   unsigned char msix_enabled;   unsigned char ari_enabled;   unsigned char is_managed;   unsigned char needs_freset;   unsigned char state_saved;   unsigned char is_physfn;   unsigned char is_virtfn;   unsigned char reset_fn;   unsigned char is_hotplug_bridge;   unsigned char __aer_firmware_first_valid;   unsigned char __aer_firmware_first;   unsigned char broken_intx_masking;   unsigned char io_window_1k;   pci_dev_flags_t dev_flags;   atomic_t enable_cnt;   u32 saved_config_space[16U];   struct hlist_head saved_cap_space;   struct bin_attribute *rom_attr;   int rom_attr_enabled;   struct bin_attribute *res_attr[17U];   struct bin_attribute *res_attr_wc[17U];   struct list_head msi_list;   const struct attribute_group **msi_irq_groups;   struct pci_vpd *vpd;   union __anonunion____missing_field_name_143 __annonCompField36;   struct pci_ats *ats;   phys_addr_t rom;   size_t romlen; } ;   429     struct pci_ops ;   429     struct msi_chip ;   429     struct pci_bus {   struct list_head node;   struct pci_bus *parent;   struct list_head children;   struct list_head devices;   struct pci_dev *self;   struct list_head slots;   struct resource *resource[4U];   struct list_head resources;   struct resource busn_res;   struct pci_ops *ops;   struct msi_chip *msi;   void *sysdata;   struct proc_dir_entry *procdir;   unsigned char number;   unsigned char primary;   unsigned char max_bus_speed;   unsigned char cur_bus_speed;   char name[48U];   unsigned short bridge_ctl;   pci_bus_flags_t bus_flags;   struct device *bridge;   struct device dev;   struct bin_attribute *legacy_io;   struct bin_attribute *legacy_mem;   unsigned char is_added; } ;   534     struct pci_ops {   int (*read)(struct pci_bus *, unsigned int, int, int, u32 *);   int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;   555     struct pci_dynids {   spinlock_t lock;   struct list_head list; } ;   569     typedef unsigned int pci_ers_result_t;   579     struct pci_error_handlers {   pci_ers_result_t  (*error_detected)(struct pci_dev *, enum pci_channel_state );   pci_ers_result_t  (*mmio_enabled)(struct pci_dev *);   pci_ers_result_t  (*link_reset)(struct pci_dev *);   pci_ers_result_t  (*slot_reset)(struct pci_dev *);   void (*resume)(struct pci_dev *); } ;   609     struct pci_driver {   struct list_head node;   const char *name;   const struct pci_device_id *id_table;   int (*probe)(struct pci_dev *, const struct pci_device_id *);   void (*remove)(struct pci_dev *);   int (*suspend)(struct pci_dev *, pm_message_t );   int (*suspend_late)(struct pci_dev *, pm_message_t );   int (*resume_early)(struct pci_dev *);   int (*resume)(struct pci_dev *);   void (*shutdown)(struct pci_dev *);   int (*sriov_configure)(struct pci_dev *, int);   const struct pci_error_handlers *err_handler;   struct device_driver driver;   struct pci_dynids dynids; } ;  1131     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    45     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    54     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    61     struct __anonstruct____missing_field_name_146 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    61     struct __anonstruct____missing_field_name_147 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    61     union __anonunion____missing_field_name_145 {   struct __anonstruct____missing_field_name_146 __annonCompField38;   struct __anonstruct____missing_field_name_147 __annonCompField39; } ;    61     struct uprobe ;    61     struct return_instance ;    61     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_145 __annonCompField40;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    93     struct xol_area ;    94     struct uprobes_state {   struct xol_area *xol_area; } ;   129     struct address_space ;   130     union __anonunion____missing_field_name_148 {   struct address_space *mapping;   void *s_mem; } ;   130     union __anonunion____missing_field_name_150 {   unsigned long index;   void *freelist;   bool pfmemalloc; } ;   130     struct __anonstruct____missing_field_name_154 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   130     union __anonunion____missing_field_name_153 {   atomic_t _mapcount;   struct __anonstruct____missing_field_name_154 __annonCompField43;   int units; } ;   130     struct __anonstruct____missing_field_name_152 {   union __anonunion____missing_field_name_153 __annonCompField44;   atomic_t _count; } ;   130     union __anonunion____missing_field_name_151 {   unsigned long counters;   struct __anonstruct____missing_field_name_152 __annonCompField45;   unsigned int active; } ;   130     struct __anonstruct____missing_field_name_149 {   union __anonunion____missing_field_name_150 __annonCompField42;   union __anonunion____missing_field_name_151 __annonCompField46; } ;   130     struct __anonstruct____missing_field_name_156 {   struct page *next;   int pages;   int pobjects; } ;   130     struct slab ;   130     union __anonunion____missing_field_name_155 {   struct list_head lru;   struct __anonstruct____missing_field_name_156 __annonCompField48;   struct list_head list;   struct slab *slab_page;   struct callback_head callback_head;   pgtable_t pmd_huge_pte; } ;   130     union __anonunion____missing_field_name_157 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache;   struct page *first_page; } ;   130     struct page {   unsigned long flags;   union __anonunion____missing_field_name_148 __annonCompField41;   struct __anonstruct____missing_field_name_149 __annonCompField47;   union __anonunion____missing_field_name_155 __annonCompField49;   union __anonunion____missing_field_name_157 __annonCompField50;   unsigned long debug_flags; } ;   186     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   238     struct __anonstruct_linear_159 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   238     union __anonunion_shared_158 {   struct __anonstruct_linear_159 linear;   struct list_head nonlinear; } ;   238     struct anon_vma ;   238     struct mempolicy ;   238     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   union __anonunion_shared_158 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy; } ;   310     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   316     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   329     struct task_rss_stat {   int events;   int count[3U]; } ;   337     struct mm_rss_stat {   atomic_long_t count[3U]; } ;   342     struct kioctx_table ;   343     struct linux_binfmt ;   343     struct mmu_notifier_mm ;   343     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   struct vm_area_struct *mmap_cache;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long shared_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state; } ;    93     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   nodemask_t nodes_to_scan;   int nid; } ;    26     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    71     struct file_ra_state ;    72     struct user_struct ;    73     struct writeback_control ;   185     struct vm_fault {   unsigned int flags;   unsigned long pgoff;   void *virtual_address;   struct page *page; } ;   210     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long);   int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ;    58     struct mem_cgroup ;   355     struct kmem_cache_cpu {   void **freelist;   unsigned long tid;   struct page *page;   struct page *partial;   unsigned int stat[26U]; } ;    48     struct kmem_cache_order_objects {   unsigned long x; } ;    58     struct memcg_cache_params ;    58     struct kmem_cache_node ;    58     struct kmem_cache {   struct kmem_cache_cpu *cpu_slab;   unsigned long flags;   unsigned long min_partial;   int size;   int object_size;   int offset;   int cpu_partial;   struct kmem_cache_order_objects oo;   struct kmem_cache_order_objects max;   struct kmem_cache_order_objects min;   gfp_t allocflags;   int refcount;   void (*ctor)(void *);   int inuse;   int align;   int reserved;   const char *name;   struct list_head list;   struct kobject kobj;   struct memcg_cache_params *memcg_params;   int max_attr_size;   int remote_node_defrag_ratio;   struct kmem_cache_node *node[1024U]; } ;   497     struct __anonstruct____missing_field_name_161 {   struct callback_head callback_head;   struct kmem_cache *memcg_caches[0U]; } ;   497     struct __anonstruct____missing_field_name_162 {   struct mem_cgroup *memcg;   struct list_head list;   struct kmem_cache *root_cache;   bool dead;   atomic_t nr_pages;   struct work_struct destroy; } ;   497     union __anonunion____missing_field_name_160 {   struct __anonstruct____missing_field_name_161 __annonCompField51;   struct __anonstruct____missing_field_name_162 __annonCompField52; } ;   497     struct memcg_cache_params {   bool is_root_cache;   union __anonunion____missing_field_name_160 __annonCompField53; } ;    34     struct dma_attrs {   unsigned long flags[1U]; } ;    70     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   351     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *);   void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    54     struct plist_head {   struct list_head node_list; } ;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;    83     struct pm_qos_request {   struct plist_node node;   int pm_qos_class;   struct delayed_work work; } ;    45     struct pm_qos_flags_request {   struct list_head node;   s32 flags; } ;    50     enum dev_pm_qos_req_type {   DEV_PM_QOS_LATENCY = 1,   DEV_PM_QOS_FLAGS = 2 } ;    55     union __anonunion_data_163 {   struct plist_node pnode;   struct pm_qos_flags_request flr; } ;    55     struct dev_pm_qos_request {   enum dev_pm_qos_req_type type;   union __anonunion_data_163 data;   struct device *dev; } ;    64     enum pm_qos_type {   PM_QOS_UNITIALIZED = 0,   PM_QOS_MAX = 1,   PM_QOS_MIN = 2 } ;    70     struct pm_qos_constraints {   struct plist_head list;   s32 target_value;   s32 default_value;   enum pm_qos_type type;   struct blocking_notifier_head *notifiers; } ;    83     struct pm_qos_flags {   struct list_head list;   s32 effective_flags; } ;    88     struct dev_pm_qos {   struct pm_qos_constraints latency;   struct pm_qos_flags flags;   struct dev_pm_qos_request *latency_req;   struct dev_pm_qos_request *flags_req; } ;   460     struct iovec {   void *iov_base;   __kernel_size_t iov_len; } ;    38     typedef s32 dma_cookie_t;  1156     struct dql {   unsigned int num_queued;   unsigned int adj_limit;   unsigned int last_obj_cnt;   unsigned int limit;   unsigned int num_completed;   unsigned int prev_ovlimit;   unsigned int prev_num_queued;   unsigned int prev_last_obj_cnt;   unsigned int lowest_slack;   unsigned long slack_start_time;   unsigned int max_limit;   unsigned int min_limit;   unsigned int slack_hold_time; } ;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    11     typedef unsigned short __kernel_sa_family_t;    23     typedef __kernel_sa_family_t sa_family_t;    24     struct sockaddr {   sa_family_t sa_family;   char sa_data[14U]; } ;    43     struct __anonstruct_sync_serial_settings_165 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback; } ;    43     typedef struct __anonstruct_sync_serial_settings_165 sync_serial_settings;    50     struct __anonstruct_te1_settings_166 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback;   unsigned int slot_map; } ;    50     typedef struct __anonstruct_te1_settings_166 te1_settings;    55     struct __anonstruct_raw_hdlc_proto_167 {   unsigned short encoding;   unsigned short parity; } ;    55     typedef struct __anonstruct_raw_hdlc_proto_167 raw_hdlc_proto;    65     struct __anonstruct_fr_proto_168 {   unsigned int t391;   unsigned int t392;   unsigned int n391;   unsigned int n392;   unsigned int n393;   unsigned short lmi;   unsigned short dce; } ;    65     typedef struct __anonstruct_fr_proto_168 fr_proto;    69     struct __anonstruct_fr_proto_pvc_169 {   unsigned int dlci; } ;    69     typedef struct __anonstruct_fr_proto_pvc_169 fr_proto_pvc;    74     struct __anonstruct_fr_proto_pvc_info_170 {   unsigned int dlci;   char master[16U]; } ;    74     typedef struct __anonstruct_fr_proto_pvc_info_170 fr_proto_pvc_info;    79     struct __anonstruct_cisco_proto_171 {   unsigned int interval;   unsigned int timeout; } ;    79     typedef struct __anonstruct_cisco_proto_171 cisco_proto;    95     struct ifmap {   unsigned long mem_start;   unsigned long mem_end;   unsigned short base_addr;   unsigned char irq;   unsigned char dma;   unsigned char port; } ;   151     union __anonunion_ifs_ifsu_172 {   raw_hdlc_proto *raw_hdlc;   cisco_proto *cisco;   fr_proto *fr;   fr_proto_pvc *fr_pvc;   fr_proto_pvc_info *fr_pvc_info;   sync_serial_settings *sync;   te1_settings *te1; } ;   151     struct if_settings {   unsigned int type;   unsigned int size;   union __anonunion_ifs_ifsu_172 ifs_ifsu; } ;   169     union __anonunion_ifr_ifrn_173 {   char ifrn_name[16U]; } ;   169     union __anonunion_ifr_ifru_174 {   struct sockaddr ifru_addr;   struct sockaddr ifru_dstaddr;   struct sockaddr ifru_broadaddr;   struct sockaddr ifru_netmask;   struct sockaddr ifru_hwaddr;   short ifru_flags;   int ifru_ivalue;   int ifru_mtu;   struct ifmap ifru_map;   char ifru_slave[16U];   char ifru_newname[16U];   void *ifru_data;   struct if_settings ifru_settings; } ;   169     struct ifreq {   union __anonunion_ifr_ifrn_173 ifr_ifrn;   union __anonunion_ifr_ifru_174 ifr_ifru; } ;    91     struct hlist_bl_node ;    91     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_177 {   spinlock_t lock;   unsigned int count; } ;   114     union __anonunion____missing_field_name_176 {   struct __anonstruct____missing_field_name_177 __annonCompField54; } ;   114     struct lockref {   union __anonunion____missing_field_name_176 __annonCompField55; } ;    49     struct nameidata ;    50     struct vfsmount ;    51     struct __anonstruct____missing_field_name_179 {   u32 hash;   u32 len; } ;    51     union __anonunion____missing_field_name_178 {   struct __anonstruct____missing_field_name_179 __annonCompField56;   u64 hash_len; } ;    51     struct qstr {   union __anonunion____missing_field_name_178 __annonCompField57;   const unsigned char *name; } ;    90     struct dentry_operations ;    90     union __anonunion_d_u_180 {   struct list_head d_child;   struct callback_head d_rcu; } ;    90     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   struct list_head d_lru;   union __anonunion_d_u_180 d_u;   struct list_head d_subdirs;   struct hlist_node d_alias; } ;   142     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool ); } ;   469     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    26     struct list_lru_node {   spinlock_t lock;   struct list_head list;   long nr_items; } ;    28     struct list_lru {   struct list_lru_node *node;   nodemask_t active_nodes; } ;    58     struct radix_tree_node ;    58     struct radix_tree_root {   unsigned int height;   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;   381     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   388     struct pid_namespace ;   388     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    30     struct block_device ;    31     struct io_context ;    32     struct cgroup_subsys_state ;    56     struct export_operations ;    58     struct kiocb ;    59     struct pipe_inode_info ;    60     struct poll_table_struct ;    61     struct kstatfs ;    62     struct swap_info_struct ;    68     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   246     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;   176     struct fs_disk_quota {   __s8 d_version;   __s8 d_flags;   __u16 d_fieldmask;   __u32 d_id;   __u64 d_blk_hardlimit;   __u64 d_blk_softlimit;   __u64 d_ino_hardlimit;   __u64 d_ino_softlimit;   __u64 d_bcount;   __u64 d_icount;   __s32 d_itimer;   __s32 d_btimer;   __u16 d_iwarns;   __u16 d_bwarns;   __s32 d_padding2;   __u64 d_rtb_hardlimit;   __u64 d_rtb_softlimit;   __u64 d_rtbcount;   __s32 d_rtbtimer;   __u16 d_rtbwarns;   __s16 d_padding3;   char d_padding4[8U]; } ;    76     struct fs_qfilestat {   __u64 qfs_ino;   __u64 qfs_nblks;   __u32 qfs_nextents; } ;   151     typedef struct fs_qfilestat fs_qfilestat_t;   152     struct fs_quota_stat {   __s8 qs_version;   __u16 qs_flags;   __s8 qs_pad;   fs_qfilestat_t qs_uquota;   fs_qfilestat_t qs_gquota;   __u32 qs_incoredqs;   __s32 qs_btimelimit;   __s32 qs_itimelimit;   __s32 qs_rtbtimelimit;   __u16 qs_bwarnlimit;   __u16 qs_iwarnlimit; } ;   166     struct fs_qfilestatv {   __u64 qfs_ino;   __u64 qfs_nblks;   __u32 qfs_nextents;   __u32 qfs_pad; } ;   196     struct fs_quota_statv {   __s8 qs_version;   __u8 qs_pad1;   __u16 qs_flags;   __u32 qs_incoredqs;   struct fs_qfilestatv qs_uquota;   struct fs_qfilestatv qs_gquota;   struct fs_qfilestatv qs_pquota;   __s32 qs_btimelimit;   __s32 qs_itimelimit;   __s32 qs_rtbtimelimit;   __u16 qs_bwarnlimit;   __u16 qs_iwarnlimit;   __u64 qs_pad2[8U]; } ;   212     struct dquot ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_182 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_182 kprojid_t;   119     struct if_dqinfo {   __u64 dqi_bgrace;   __u64 dqi_igrace;   __u32 dqi_flags;   __u32 dqi_valid; } ;   152     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    60     typedef long long qsize_t;    61     union __anonunion____missing_field_name_183 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    61     struct kqid {   union __anonunion____missing_field_name_183 __annonCompField58;   enum quota_type type; } ;   178     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time_t dqb_btime;   time_t dqb_itime; } ;   200     struct quota_format_type ;   201     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_maxblimit;   qsize_t dqi_maxilimit;   void *dqi_priv; } ;   264     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   291     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *); } ;   302     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *); } ;   316     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_on_meta)(struct super_block *, int, int);   int (*quota_off)(struct super_block *, int);   int (*quota_sync)(struct super_block *, int);   int (*get_info)(struct super_block *, int, struct if_dqinfo *);   int (*set_info)(struct super_block *, int, struct if_dqinfo *);   int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *);   int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *);   int (*get_xstate)(struct super_block *, struct fs_quota_stat *);   int (*set_xstate)(struct super_block *, unsigned int, int);   int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); } ;   333     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   379     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct rw_semaphore dqptr_sem;   struct inode *files[2U];   struct mem_dqinfo info[2U];   const struct quota_format_ops *ops[2U]; } ;   345     union __anonunion_arg_185 {   char *buf;   void *data; } ;   345     struct __anonstruct_read_descriptor_t_184 {   size_t written;   size_t count;   union __anonunion_arg_185 arg;   int error; } ;   345     typedef struct __anonstruct_read_descriptor_t_184 read_descriptor_t;   348     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(int, struct kiocb *, const struct iovec *, loff_t , unsigned long);   int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   408     struct backing_dev_info ;   409     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   unsigned int i_mmap_writable;   struct rb_root i_mmap;   struct list_head i_mmap_nonlinear;   struct mutex i_mmap_mutex;   unsigned long nrpages;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   struct backing_dev_info *backing_dev_info;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   430     struct request_queue ;   431     struct hd_struct ;   431     struct gendisk ;   431     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   struct list_head bd_inodes;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   503     struct posix_acl ;   504     struct inode_operations ;   504     union __anonunion____missing_field_name_186 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   504     union __anonunion____missing_field_name_187 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   504     struct file_lock ;   504     struct cdev ;   504     union __anonunion____missing_field_name_188 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev; } ;   504     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_186 __annonCompField59;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct mutex i_mutex;   unsigned long dirtied_when;   struct hlist_node i_hash;   struct list_head i_wb_list;   struct list_head i_lru;   struct list_head i_sb_list;   union __anonunion____missing_field_name_187 __annonCompField60;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   const struct file_operations *i_fop;   struct file_lock *i_flock;   struct address_space i_data;   struct dquot *i_dquot[2U];   struct list_head i_devices;   union __anonunion____missing_field_name_188 __annonCompField61;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   atomic_t i_readcount;   void *i_private; } ;   740     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   748     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   771     union __anonunion_f_u_189 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   771     struct file {   union __anonunion_f_u_189 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping;   unsigned long f_mnt_write_state; } ;   909     struct files_struct ;   909     typedef struct files_struct *fl_owner_t;   910     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   915     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, struct file_lock *, int);   void (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock **, int); } ;   928     struct net ;   933     struct nlm_lockowner ;   934     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_191 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_190 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_191 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_190 fl_u; } ;  1036     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1228     struct sb_writers {   struct percpu_counter counter[3U];   wait_queue_head_t wait;   int frozen;   wait_queue_head_t wait_unfrozen;   struct lockdep_map lock_map[3U]; } ;  1244     struct super_operations ;  1244     struct xattr_handler ;  1244     struct mtd_info ;  1244     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   struct list_head s_inodes;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu; } ;  1474     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1512     struct dir_context {   int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1517     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t );   ssize_t  (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t );   int (*iterate)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   int (*show_fdinfo)(struct seq_file *, struct file *); } ;  1555     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   void * (*follow_link)(struct dentry *, struct nameidata *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   void (*put_link)(struct dentry *, struct nameidata *, void *);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1600     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_fs)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, int);   long int (*free_cached_objects)(struct super_block *, long, int); } ;  1814     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;     4     typedef unsigned long cputime_t;    24     struct __anonstruct_sigset_t_192 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_192 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_194 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_195 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_196 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_197 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__sigfault_198 {   void *_addr;   short _addr_lsb; } ;    11     struct __anonstruct__sigpoll_199 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_200 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_193 {   int _pad[28U];   struct __anonstruct__kill_194 _kill;   struct __anonstruct__timer_195 _timer;   struct __anonstruct__rt_196 _rt;   struct __anonstruct__sigchld_197 _sigchld;   struct __anonstruct__sigfault_198 _sigfault;   struct __anonstruct__sigpoll_199 _sigpoll;   struct __anonstruct__sigsys_200 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_193 _sifields; } ;   109     typedef struct siginfo siginfo_t;    21     struct sigpending {   struct list_head list;   sigset_t signal; } ;   251     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   265     struct k_sigaction {   struct sigaction sa; } ;    46     struct seccomp_filter ;    47     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   unsigned long state;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   132     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t resolution;   ktime_t  (*get_time)();   ktime_t softirq_time;   ktime_t offset; } ;   163     struct hrtimer_cpu_base {   raw_spinlock_t lock;   unsigned int active_bases;   unsigned int clock_was_set;   ktime_t expires_next;   int hres_active;   int hang_detected;   unsigned long nr_events;   unsigned long nr_retries;   unsigned long nr_hangs;   ktime_t max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;   463     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    39     struct assoc_array_ptr ;    39     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;   123     union __anonunion____missing_field_name_203 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   123     struct key_user ;   123     union __anonunion____missing_field_name_204 {   time_t expiry;   time_t revoked_at; } ;   123     struct __anonstruct____missing_field_name_206 {   struct key_type *type;   char *description; } ;   123     union __anonunion____missing_field_name_205 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_206 __annonCompField64; } ;   123     union __anonunion_type_data_207 {   struct list_head link;   unsigned long x[2U];   void *p[2U];   int reject_error; } ;   123     union __anonunion_payload_209 {   unsigned long value;   void *rcudata;   void *data;   void *data2[2U]; } ;   123     union __anonunion____missing_field_name_208 {   union __anonunion_payload_209 payload;   struct assoc_array keys; } ;   123     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_203 __annonCompField62;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_204 __annonCompField63;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_205 __annonCompField65;   union __anonunion_type_data_207 type_data;   union __anonunion____missing_field_name_208 __annonCompField66; } ;   345     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    78     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   123     struct futex_pi_state ;   124     struct robust_list_head ;   125     struct bio_list ;   126     struct fs_struct ;   127     struct perf_event_context ;   128     struct blk_plug ;   180     struct cfs_rq ;   181     struct task_group ;   421     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   460     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   468     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   475     struct cputime {   cputime_t utime;   cputime_t stime; } ;   487     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   507     struct thread_group_cputimer {   struct task_cputime cputime;   int running;   raw_spinlock_t lock; } ;   549     struct autogroup ;   550     struct tty_struct ;   550     struct taskstats ;   550     struct tty_audit_buf ;   550     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   unsigned int audit_tty_log_passwd;   struct tty_audit_buf *tty_audit_buf;   struct rw_semaphore group_rwsem;   oom_flags_t oom_flags;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   730     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t files;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   774     struct reclaim_state ;   775     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   790     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   struct timespec blkio_start;   struct timespec blkio_end;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   struct timespec freepages_start;   struct timespec freepages_end;   u64 freepages_delay;   u32 freepages_count; } ;  1004     struct uts_namespace ;  1005     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1013     struct sched_avg {   u32 runnable_avg_sum;   u32 runnable_avg_period;   u64 last_runnable_update;   s64 decay_count;   unsigned long load_avg_contrib; } ;  1025     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1060     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1091     struct rt_rq ;  1091     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1107     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_new;   int dl_boosted;   struct hrtimer dl_timer; } ;  1162     struct memcg_batch_info {   int do_batch;   struct mem_cgroup *memcg;   unsigned long nr_pages;   unsigned long memsw_nr_pages; } ;  1569     struct memcg_oom_info {   struct mem_cgroup *memcg;   gfp_t gfp_mask;   int order;   unsigned char may_oom; } ;  1576     struct sched_class ;  1576     struct css_set ;  1576     struct compat_robust_list_head ;  1576     struct numa_group ;  1576     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   struct task_struct *last_wakee;   unsigned long wakee_flips;   unsigned long wakee_flip_decay_ts;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   unsigned char brk_randomized;   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned int jobctl;   unsigned int personality;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char no_new_privs;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   struct timespec start_time;   struct timespec real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   int link_count;   int total_link_count;   struct sysv_sem sysvsem;   unsigned long last_switch_count;   struct thread_struct thread;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   int (*notifier)(void *);   void *notifier_data;   sigset_t *notifier_mask;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct task_struct *pi_top_task;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   int numa_migrate_deferred;   unsigned long numa_migrate_retry;   u64 node_stamp;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long *numa_faults_buffer;   unsigned long numa_faults_locality[2U];   unsigned long numa_pages_migrated;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   unsigned long timer_slack_ns;   unsigned long default_timer_slack_ns;   unsigned long trace;   unsigned long trace_recursion;   struct memcg_batch_info memcg_batch;   unsigned int memcg_kmem_skip_account;   struct memcg_oom_info memcg_oom;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg; } ;    39     typedef s32 compat_long_t;    44     typedef u32 compat_uptr_t;   273     struct compat_robust_list {   compat_uptr_t next; } ;   277     struct compat_robust_list_head {   struct compat_robust_list list;   compat_long_t futex_offset;   compat_uptr_t list_op_pending; } ;    62     struct exception_table_entry {   int insn;   int fixup; } ;   119     struct sk_buff ;    15     typedef u64 netdev_features_t;    18     struct nf_conntrack {   atomic_t use; } ;   136     struct nf_bridge_info {   atomic_t use;   unsigned int mask;   struct net_device *physindev;   struct net_device *physoutdev;   unsigned long data[4U]; } ;   146     struct sk_buff_head {   struct sk_buff *next;   struct sk_buff *prev;   __u32 qlen;   spinlock_t lock; } ;   172     struct skb_frag_struct ;   172     typedef struct skb_frag_struct skb_frag_t;   173     struct __anonstruct_page_226 {   struct page *p; } ;   173     struct skb_frag_struct {   struct __anonstruct_page_226 page;   __u32 page_offset;   __u32 size; } ;   206     struct skb_shared_hwtstamps {   ktime_t hwtstamp;   ktime_t syststamp; } ;   275     struct skb_shared_info {   unsigned char nr_frags;   __u8 tx_flags;   unsigned short gso_size;   unsigned short gso_segs;   unsigned short gso_type;   struct sk_buff *frag_list;   struct skb_shared_hwtstamps hwtstamps;   __be32 ip6_frag_id;   atomic_t dataref;   void *destructor_arg;   skb_frag_t frags[17U]; } ;   354     typedef unsigned int sk_buff_data_t;   355     struct sec_path ;   355     struct __anonstruct____missing_field_name_228 {   __u16 csum_start;   __u16 csum_offset; } ;   355     union __anonunion____missing_field_name_227 {   __wsum csum;   struct __anonstruct____missing_field_name_228 __annonCompField69; } ;   355     union __anonunion____missing_field_name_229 {   unsigned int napi_id;   dma_cookie_t dma_cookie; } ;   355     union __anonunion____missing_field_name_230 {   __u32 mark;   __u32 dropcount;   __u32 reserved_tailroom; } ;   355     struct sk_buff {   struct sk_buff *next;   struct sk_buff *prev;   ktime_t tstamp;   struct sock *sk;   struct net_device *dev;   char cb[48U];   unsigned long _skb_refdst;   struct sec_path *sp;   unsigned int len;   unsigned int data_len;   __u16 mac_len;   __u16 hdr_len;   union __anonunion____missing_field_name_227 __annonCompField70;   __u32 priority;   unsigned char local_df;   unsigned char cloned;   unsigned char ip_summed;   unsigned char nohdr;   unsigned char nfctinfo;   unsigned char pkt_type;   unsigned char fclone;   unsigned char ipvs_property;   unsigned char peeked;   unsigned char nf_trace;   __be16 protocol;   void (*destructor)(struct sk_buff *);   struct nf_conntrack *nfct;   struct nf_bridge_info *nf_bridge;   int skb_iif;   __u32 rxhash;   __be16 vlan_proto;   __u16 vlan_tci;   __u16 tc_index;   __u16 tc_verd;   __u16 queue_mapping;   unsigned char ndisc_nodetype;   unsigned char pfmemalloc;   unsigned char ooo_okay;   unsigned char l4_rxhash;   unsigned char wifi_acked_valid;   unsigned char wifi_acked;   unsigned char no_fcs;   unsigned char head_frag;   unsigned char encapsulation;   union __anonunion____missing_field_name_229 __annonCompField71;   __u32 secmark;   union __anonunion____missing_field_name_230 __annonCompField72;   __be16 inner_protocol;   __u16 inner_transport_header;   __u16 inner_network_header;   __u16 inner_mac_header;   __u16 transport_header;   __u16 network_header;   __u16 mac_header;   sk_buff_data_t tail;   sk_buff_data_t end;   unsigned char *head;   unsigned char *data;   unsigned int truesize;   atomic_t users; } ;   578     struct dst_entry ;  2939     struct ethhdr {   unsigned char h_dest[6U];   unsigned char h_source[6U];   __be16 h_proto; } ;    34     struct ethtool_cmd {   __u32 cmd;   __u32 supported;   __u32 advertising;   __u16 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 transceiver;   __u8 autoneg;   __u8 mdio_support;   __u32 maxtxpkt;   __u32 maxrxpkt;   __u16 speed_hi;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __u32 lp_advertising;   __u32 reserved[2U]; } ;    65     struct ethtool_drvinfo {   __u32 cmd;   char driver[32U];   char version[32U];   char fw_version[32U];   char bus_info[32U];   char reserved1[32U];   char reserved2[12U];   __u32 n_priv_flags;   __u32 n_stats;   __u32 testinfo_len;   __u32 eedump_len;   __u32 regdump_len; } ;   105     struct ethtool_wolinfo {   __u32 cmd;   __u32 supported;   __u32 wolopts;   __u8 sopass[6U]; } ;   120     struct ethtool_regs {   __u32 cmd;   __u32 version;   __u32 len;   __u8 data[0U]; } ;   128     struct ethtool_eeprom {   __u32 cmd;   __u32 magic;   __u32 offset;   __u32 len;   __u8 data[0U]; } ;   137     struct ethtool_eee {   __u32 cmd;   __u32 supported;   __u32 advertised;   __u32 lp_advertised;   __u32 eee_active;   __u32 eee_enabled;   __u32 tx_lpi_enabled;   __u32 tx_lpi_timer;   __u32 reserved[2U]; } ;   166     struct ethtool_modinfo {   __u32 cmd;   __u32 type;   __u32 eeprom_len;   __u32 reserved[8U]; } ;   183     struct ethtool_coalesce {   __u32 cmd;   __u32 rx_coalesce_usecs;   __u32 rx_max_coalesced_frames;   __u32 rx_coalesce_usecs_irq;   __u32 rx_max_coalesced_frames_irq;   __u32 tx_coalesce_usecs;   __u32 tx_max_coalesced_frames;   __u32 tx_coalesce_usecs_irq;   __u32 tx_max_coalesced_frames_irq;   __u32 stats_block_coalesce_usecs;   __u32 use_adaptive_rx_coalesce;   __u32 use_adaptive_tx_coalesce;   __u32 pkt_rate_low;   __u32 rx_coalesce_usecs_low;   __u32 rx_max_coalesced_frames_low;   __u32 tx_coalesce_usecs_low;   __u32 tx_max_coalesced_frames_low;   __u32 pkt_rate_high;   __u32 rx_coalesce_usecs_high;   __u32 rx_max_coalesced_frames_high;   __u32 tx_coalesce_usecs_high;   __u32 tx_max_coalesced_frames_high;   __u32 rate_sample_interval; } ;   281     struct ethtool_ringparam {   __u32 cmd;   __u32 rx_max_pending;   __u32 rx_mini_max_pending;   __u32 rx_jumbo_max_pending;   __u32 tx_max_pending;   __u32 rx_pending;   __u32 rx_mini_pending;   __u32 rx_jumbo_pending;   __u32 tx_pending; } ;   303     struct ethtool_channels {   __u32 cmd;   __u32 max_rx;   __u32 max_tx;   __u32 max_other;   __u32 max_combined;   __u32 rx_count;   __u32 tx_count;   __u32 other_count;   __u32 combined_count; } ;   331     struct ethtool_pauseparam {   __u32 cmd;   __u32 autoneg;   __u32 rx_pause;   __u32 tx_pause; } ;   382     struct ethtool_test {   __u32 cmd;   __u32 flags;   __u32 reserved;   __u32 len;   __u64 data[0U]; } ;   404     struct ethtool_stats {   __u32 cmd;   __u32 n_stats;   __u64 data[0U]; } ;   425     struct ethtool_tcpip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be16 psrc;   __be16 pdst;   __u8 tos; } ;   458     struct ethtool_ah_espip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 spi;   __u8 tos; } ;   474     struct ethtool_usrip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 l4_4_bytes;   __u8 tos;   __u8 ip_ver;   __u8 proto; } ;   494     union ethtool_flow_union {   struct ethtool_tcpip4_spec tcp_ip4_spec;   struct ethtool_tcpip4_spec udp_ip4_spec;   struct ethtool_tcpip4_spec sctp_ip4_spec;   struct ethtool_ah_espip4_spec ah_ip4_spec;   struct ethtool_ah_espip4_spec esp_ip4_spec;   struct ethtool_usrip4_spec usr_ip4_spec;   struct ethhdr ether_spec;   __u8 hdata[52U]; } ;   505     struct ethtool_flow_ext {   __u8 padding[2U];   unsigned char h_dest[6U];   __be16 vlan_etype;   __be16 vlan_tci;   __be32 data[2U]; } ;   524     struct ethtool_rx_flow_spec {   __u32 flow_type;   union ethtool_flow_union h_u;   struct ethtool_flow_ext h_ext;   union ethtool_flow_union m_u;   struct ethtool_flow_ext m_ext;   __u64 ring_cookie;   __u32 location; } ;   550     struct ethtool_rxnfc {   __u32 cmd;   __u32 flow_type;   __u64 data;   struct ethtool_rx_flow_spec fs;   __u32 rule_cnt;   __u32 rule_locs[0U]; } ;   684     struct ethtool_flash {   __u32 cmd;   __u32 region;   char data[128U]; } ;   692     struct ethtool_dump {   __u32 cmd;   __u32 version;   __u32 flag;   __u32 len;   __u8 data[0U]; } ;   768     struct ethtool_ts_info {   __u32 cmd;   __u32 so_timestamping;   __s32 phc_index;   __u32 tx_types;   __u32 tx_reserved[3U];   __u32 rx_filters;   __u32 rx_reserved[3U]; } ;    44     enum ethtool_phys_id_state {   ETHTOOL_ID_INACTIVE = 0,   ETHTOOL_ID_ACTIVE = 1,   ETHTOOL_ID_ON = 2,   ETHTOOL_ID_OFF = 3 } ;    79     struct ethtool_ops {   int (*get_settings)(struct net_device *, struct ethtool_cmd *);   int (*set_settings)(struct net_device *, struct ethtool_cmd *);   void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);   int (*get_regs_len)(struct net_device *);   void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);   void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);   int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);   u32  (*get_msglevel)(struct net_device *);   void (*set_msglevel)(struct net_device *, u32 );   int (*nway_reset)(struct net_device *);   u32  (*get_link)(struct net_device *);   int (*get_eeprom_len)(struct net_device *);   int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);   int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);   void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);   int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);   void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);   void (*get_strings)(struct net_device *, u32 , u8 *);   int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state );   void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);   int (*begin)(struct net_device *);   void (*complete)(struct net_device *);   u32  (*get_priv_flags)(struct net_device *);   int (*set_priv_flags)(struct net_device *, u32 );   int (*get_sset_count)(struct net_device *, int);   int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);   int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);   int (*flash_device)(struct net_device *, struct ethtool_flash *);   int (*reset)(struct net_device *, u32 *);   u32  (*get_rxfh_indir_size)(struct net_device *);   int (*get_rxfh_indir)(struct net_device *, u32 *);   int (*set_rxfh_indir)(struct net_device *, const u32 *);   void (*get_channels)(struct net_device *, struct ethtool_channels *);   int (*set_channels)(struct net_device *, struct ethtool_channels *);   int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);   int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);   int (*set_dump)(struct net_device *, struct ethtool_dump *);   int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);   int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);   int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_eee)(struct net_device *, struct ethtool_eee *);   int (*set_eee)(struct net_device *, struct ethtool_eee *); } ;   249     struct prot_inuse ;   250     struct netns_core {   struct ctl_table_header *sysctl_hdr;   int sysctl_somaxconn;   struct prot_inuse *inuse; } ;    38     struct u64_stats_sync { } ;   145     struct ipstats_mib {   u64 mibs[36U];   struct u64_stats_sync syncp; } ;    61     struct icmp_mib {   unsigned long mibs[28U]; } ;    67     struct icmpmsg_mib {   atomic_long_t mibs[512U]; } ;    72     struct icmpv6_mib {   unsigned long mibs[6U]; } ;    83     struct icmpv6msg_mib {   atomic_long_t mibs[512U]; } ;    93     struct tcp_mib {   unsigned long mibs[16U]; } ;   100     struct udp_mib {   unsigned long mibs[8U]; } ;   106     struct linux_mib {   unsigned long mibs[97U]; } ;   112     struct linux_xfrm_mib {   unsigned long mibs[29U]; } ;   118     struct netns_mib {   struct tcp_mib *tcp_statistics[1U];   struct ipstats_mib *ip_statistics[1U];   struct linux_mib *net_statistics[1U];   struct udp_mib *udp_statistics[1U];   struct udp_mib *udplite_statistics[1U];   struct icmp_mib *icmp_statistics[1U];   struct icmpmsg_mib *icmpmsg_statistics;   struct proc_dir_entry *proc_net_devsnmp6;   struct udp_mib *udp_stats_in6[1U];   struct udp_mib *udplite_stats_in6[1U];   struct ipstats_mib *ipv6_statistics[1U];   struct icmpv6_mib *icmpv6_statistics[1U];   struct icmpv6msg_mib *icmpv6msg_statistics;   struct linux_xfrm_mib *xfrm_statistics[1U]; } ;    26     struct netns_unix {   int sysctl_max_dgram_qlen;   struct ctl_table_header *ctl; } ;    12     struct netns_packet {   struct mutex sklist_lock;   struct hlist_head sklist; } ;    14     struct netns_frags {   int nqueues;   struct list_head lru_list;   spinlock_t lru_lock;   struct percpu_counter mem;   int timeout;   int high_thresh;   int low_thresh; } ;   180     struct tcpm_hash_bucket ;   181     struct ipv4_devconf ;   182     struct fib_rules_ops ;   183     struct fib_table ;   184     struct local_ports {   seqlock_t lock;   int range[2U]; } ;    22     struct inet_peer_base ;    22     struct xt_table ;    22     struct netns_ipv4 {   struct ctl_table_header *forw_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *ipv4_hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *xfrm4_hdr;   struct ipv4_devconf *devconf_all;   struct ipv4_devconf *devconf_dflt;   struct fib_rules_ops *rules_ops;   bool fib_has_custom_rules;   struct fib_table *fib_local;   struct fib_table *fib_main;   struct fib_table *fib_default;   int fib_num_tclassid_users;   struct hlist_head *fib_table_hash;   struct sock *fibnl;   struct sock **icmp_sk;   struct inet_peer_base *peers;   struct tcpm_hash_bucket *tcp_metrics_hash;   unsigned int tcp_metrics_hash_log;   struct netns_frags frags;   struct xt_table *iptable_filter;   struct xt_table *iptable_mangle;   struct xt_table *iptable_raw;   struct xt_table *arptable_filter;   struct xt_table *iptable_security;   struct xt_table *nat_table;   int sysctl_icmp_echo_ignore_all;   int sysctl_icmp_echo_ignore_broadcasts;   int sysctl_icmp_ignore_bogus_error_responses;   int sysctl_icmp_ratelimit;   int sysctl_icmp_ratemask;   int sysctl_icmp_errors_use_inbound_ifaddr;   struct local_ports sysctl_local_ports;   int sysctl_tcp_ecn;   int sysctl_ip_no_pmtu_disc;   int sysctl_ip_fwd_use_pmtu;   kgid_t sysctl_ping_group_range[2U];   atomic_t dev_addr_genid;   struct list_head mr_tables;   struct fib_rules_ops *mr_rules_ops;   atomic_t rt_genid; } ;    90     struct neighbour ;    90     struct dst_ops {   unsigned short family;   __be16 protocol;   unsigned int gc_thresh;   int (*gc)(struct dst_ops *);   struct dst_entry * (*check)(struct dst_entry *, __u32 );   unsigned int (*default_advmss)(const struct dst_entry *);   unsigned int (*mtu)(const struct dst_entry *);   u32 * (*cow_metrics)(struct dst_entry *, unsigned long);   void (*destroy)(struct dst_entry *);   void (*ifdown)(struct dst_entry *, struct net_device *, int);   struct dst_entry * (*negative_advice)(struct dst_entry *);   void (*link_failure)(struct sk_buff *);   void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 );   void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);   int (*local_out)(struct sk_buff *);   struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);   struct kmem_cache *kmem_cachep;   struct percpu_counter pcpuc_entries; } ;    73     struct netns_sysctl_ipv6 {   struct ctl_table_header *hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *icmp_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *xfrm6_hdr;   int bindv6only;   int flush_delay;   int ip6_rt_max_size;   int ip6_rt_gc_min_interval;   int ip6_rt_gc_timeout;   int ip6_rt_gc_interval;   int ip6_rt_gc_elasticity;   int ip6_rt_mtu_expires;   int ip6_rt_min_advmss;   int flowlabel_consistency;   int icmpv6_time;   int anycast_src_echo_reply; } ;    34     struct ipv6_devconf ;    34     struct rt6_info ;    34     struct rt6_statistics ;    34     struct fib6_table ;    34     struct netns_ipv6 {   struct netns_sysctl_ipv6 sysctl;   struct ipv6_devconf *devconf_all;   struct ipv6_devconf *devconf_dflt;   struct inet_peer_base *peers;   struct netns_frags frags;   struct xt_table *ip6table_filter;   struct xt_table *ip6table_mangle;   struct xt_table *ip6table_raw;   struct xt_table *ip6table_security;   struct xt_table *ip6table_nat;   struct rt6_info *ip6_null_entry;   struct rt6_statistics *rt6_stats;   struct timer_list ip6_fib_timer;   struct hlist_head *fib_table_hash;   struct fib6_table *fib6_main_tbl;   struct dst_ops ip6_dst_ops;   unsigned int ip6_rt_gc_expire;   unsigned long ip6_rt_last_gc;   struct rt6_info *ip6_prohibit_entry;   struct rt6_info *ip6_blk_hole_entry;   struct fib6_table *fib6_local_tbl;   struct fib_rules_ops *fib6_rules_ops;   struct sock **icmp_sk;   struct sock *ndisc_sk;   struct sock *tcp_sk;   struct sock *igmp_sk;   struct list_head mr6_tables;   struct fib_rules_ops *mr6_rules_ops;   atomic_t dev_addr_genid;   atomic_t rt_genid; } ;    79     struct netns_nf_frag {   struct netns_sysctl_ipv6 sysctl;   struct netns_frags frags; } ;    85     struct sctp_mib ;    86     struct netns_sctp {   struct sctp_mib *sctp_statistics[1U];   struct proc_dir_entry *proc_net_sctp;   struct ctl_table_header *sysctl_header;   struct sock *ctl_sock;   struct list_head local_addr_list;   struct list_head addr_waitq;   struct timer_list addr_wq_timer;   struct list_head auto_asconf_splist;   spinlock_t addr_wq_lock;   spinlock_t local_addr_lock;   unsigned int rto_initial;   unsigned int rto_min;   unsigned int rto_max;   int rto_alpha;   int rto_beta;   int max_burst;   int cookie_preserve_enable;   char *sctp_hmac_alg;   unsigned int valid_cookie_life;   unsigned int sack_timeout;   unsigned int hb_interval;   int max_retrans_association;   int max_retrans_path;   int max_retrans_init;   int pf_retrans;   int sndbuf_policy;   int rcvbuf_policy;   int default_auto_asconf;   int addip_enable;   int addip_noauth;   int prsctp_enable;   int auth_enable;   int scope_policy;   int rwnd_upd_shift;   unsigned long max_autoclose; } ;   133     struct netns_dccp {   struct sock *v4_ctl_sk;   struct sock *v6_ctl_sk; } ;   324     struct nlattr ;   337     struct nf_logger ;   338     struct netns_nf {   struct proc_dir_entry *proc_netfilter;   const struct nf_logger *nf_loggers[13U];   struct ctl_table_header *nf_log_dir_header; } ;    17     struct ebt_table ;    18     struct netns_xt {   struct list_head tables[13U];   bool notrack_deprecated_warning;   struct ebt_table *broute_table;   struct ebt_table *frame_filter;   struct ebt_table *frame_nat;   bool ulog_warn_deprecated;   bool ebt_ulog_warn_deprecated; } ;    24     struct hlist_nulls_node ;    24     struct hlist_nulls_head {   struct hlist_nulls_node *first; } ;    20     struct hlist_nulls_node {   struct hlist_nulls_node *next;   struct hlist_nulls_node **pprev; } ;    32     struct nf_proto_net {   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table;   struct ctl_table_header *ctl_compat_header;   struct ctl_table *ctl_compat_table;   unsigned int users; } ;    23     struct nf_generic_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    28     struct nf_tcp_net {   struct nf_proto_net pn;   unsigned int timeouts[14U];   unsigned int tcp_loose;   unsigned int tcp_be_liberal;   unsigned int tcp_max_retrans; } ;    42     struct nf_udp_net {   struct nf_proto_net pn;   unsigned int timeouts[2U]; } ;    47     struct nf_icmp_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    52     struct nf_ip_net {   struct nf_generic_net generic;   struct nf_tcp_net tcp;   struct nf_udp_net udp;   struct nf_icmp_net icmp;   struct nf_icmp_net icmpv6;   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table; } ;    63     struct ip_conntrack_stat ;    63     struct nf_ct_event_notifier ;    63     struct nf_exp_event_notifier ;    63     struct netns_ct {   atomic_t count;   unsigned int expect_count;   struct ctl_table_header *sysctl_header;   struct ctl_table_header *acct_sysctl_header;   struct ctl_table_header *tstamp_sysctl_header;   struct ctl_table_header *event_sysctl_header;   struct ctl_table_header *helper_sysctl_header;   char *slabname;   unsigned int sysctl_log_invalid;   unsigned int sysctl_events_retry_timeout;   int sysctl_events;   int sysctl_acct;   int sysctl_auto_assign_helper;   bool auto_assign_helper_warned;   int sysctl_tstamp;   int sysctl_checksum;   unsigned int htable_size;   struct kmem_cache *nf_conntrack_cachep;   struct hlist_nulls_head *hash;   struct hlist_head *expect_hash;   struct hlist_nulls_head unconfirmed;   struct hlist_nulls_head dying;   struct hlist_nulls_head tmpl;   struct ip_conntrack_stat *stat;   struct nf_ct_event_notifier *nf_conntrack_event_cb;   struct nf_exp_event_notifier *nf_expect_event_cb;   struct nf_ip_net nf_ct_proto;   unsigned int labels_used;   u8 label_words;   struct hlist_head *nat_bysource;   unsigned int nat_htable_size; } ;   104     struct nft_af_info ;   105     struct netns_nftables {   struct list_head af_info;   struct list_head commit_list;   struct nft_af_info *ipv4;   struct nft_af_info *ipv6;   struct nft_af_info *inet;   struct nft_af_info *arp;   struct nft_af_info *bridge;   u8 gencursor;   u8 genctr; } ;   489     struct xfrm_policy_hash {   struct hlist_head *table;   unsigned int hmask; } ;    16     struct netns_xfrm {   struct list_head state_all;   struct hlist_head *state_bydst;   struct hlist_head *state_bysrc;   struct hlist_head *state_byspi;   unsigned int state_hmask;   unsigned int state_num;   struct work_struct state_hash_work;   struct hlist_head state_gc_list;   struct work_struct state_gc_work;   struct list_head policy_all;   struct hlist_head *policy_byidx;   unsigned int policy_idx_hmask;   struct hlist_head policy_inexact[6U];   struct xfrm_policy_hash policy_bydst[6U];   unsigned int policy_count[6U];   struct work_struct policy_hash_work;   struct sock *nlsk;   struct sock *nlsk_stash;   u32 sysctl_aevent_etime;   u32 sysctl_aevent_rseqth;   int sysctl_larval_drop;   u32 sysctl_acq_expires;   struct ctl_table_header *sysctl_hdr;   struct dst_ops xfrm4_dst_ops;   struct dst_ops xfrm6_dst_ops;   spinlock_t xfrm_state_lock;   spinlock_t xfrm_policy_sk_bundle_lock;   rwlock_t xfrm_policy_lock;   struct mutex xfrm_cfg_mutex; } ;    65     struct net_generic ;    66     struct netns_ipvs ;    67     struct net {   atomic_t passive;   atomic_t count;   spinlock_t rules_mod_lock;   struct list_head list;   struct list_head cleanup_list;   struct list_head exit_list;   struct user_namespace *user_ns;   unsigned int proc_inum;   struct proc_dir_entry *proc_net;   struct proc_dir_entry *proc_net_stat;   struct ctl_table_set sysctls;   struct sock *rtnl;   struct sock *genl_sock;   struct list_head dev_base_head;   struct hlist_head *dev_name_head;   struct hlist_head *dev_index_head;   unsigned int dev_base_seq;   int ifindex;   unsigned int dev_unreg_count;   struct list_head rules_ops;   struct net_device *loopback_dev;   struct netns_core core;   struct netns_mib mib;   struct netns_packet packet;   struct netns_unix unx;   struct netns_ipv4 ipv4;   struct netns_ipv6 ipv6;   struct netns_sctp sctp;   struct netns_dccp dccp;   struct netns_nf nf;   struct netns_xt xt;   struct netns_ct ct;   struct netns_nftables nft;   struct netns_nf_frag nf_frag;   struct sock *nfnl;   struct sock *nfnl_stash;   struct sk_buff_head wext_nlevents;   struct net_generic *gen;   struct netns_xfrm xfrm;   struct netns_ipvs *ipvs;   struct sock *diag_nlsk;   atomic_t fnhe_genid; } ;   395     struct dsa_chip_data {   struct device *mii_bus;   int sw_addr;   char *port_names[12U];   s8 *rtable; } ;    46     struct dsa_platform_data {   struct device *netdev;   int nr_chips;   struct dsa_chip_data *chip; } ;    61     struct dsa_switch ;    61     struct dsa_switch_tree {   struct dsa_platform_data *pd;   struct net_device *master_netdev;   __be16 tag_protocol;   s8 cpu_switch;   s8 cpu_port;   int link_poll_needed;   struct work_struct link_poll_work;   struct timer_list link_poll_timer;   struct dsa_switch *ds[4U]; } ;    94     struct dsa_switch_driver ;    94     struct mii_bus ;    94     struct dsa_switch {   struct dsa_switch_tree *dst;   int index;   struct dsa_chip_data *pd;   struct dsa_switch_driver *drv;   struct mii_bus *master_mii_bus;   u32 dsa_port_mask;   u32 phys_port_mask;   struct mii_bus *slave_mii_bus;   struct net_device *ports[12U]; } ;   146     struct dsa_switch_driver {   struct list_head list;   __be16 tag_protocol;   int priv_size;   char * (*probe)(struct mii_bus *, int);   int (*setup)(struct dsa_switch *);   int (*set_addr)(struct dsa_switch *, u8 *);   int (*phy_read)(struct dsa_switch *, int, int);   int (*phy_write)(struct dsa_switch *, int, int, u16 );   void (*poll_link)(struct dsa_switch *);   void (*get_strings)(struct dsa_switch *, int, uint8_t *);   void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *);   int (*get_sset_count)(struct dsa_switch *); } ;   200     struct ieee_ets {   __u8 willing;   __u8 ets_cap;   __u8 cbs;   __u8 tc_tx_bw[8U];   __u8 tc_rx_bw[8U];   __u8 tc_tsa[8U];   __u8 prio_tc[8U];   __u8 tc_reco_bw[8U];   __u8 tc_reco_tsa[8U];   __u8 reco_prio_tc[8U]; } ;    69     struct ieee_maxrate {   __u64 tc_maxrate[8U]; } ;    80     struct ieee_pfc {   __u8 pfc_cap;   __u8 pfc_en;   __u8 mbc;   __u16 delay;   __u64 requests[8U];   __u64 indications[8U]; } ;   100     struct cee_pg {   __u8 willing;   __u8 error;   __u8 pg_en;   __u8 tcs_supported;   __u8 pg_bw[8U];   __u8 prio_pg[8U]; } ;   123     struct cee_pfc {   __u8 willing;   __u8 error;   __u8 pfc_en;   __u8 tcs_supported; } ;   138     struct dcb_app {   __u8 selector;   __u8 priority;   __u16 protocol; } ;   167     struct dcb_peer_app_info {   __u8 willing;   __u8 error; } ;    40     struct dcbnl_rtnl_ops {   int (*ieee_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_setets)(struct net_device *, struct ieee_ets *);   int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_getapp)(struct net_device *, struct dcb_app *);   int (*ieee_setapp)(struct net_device *, struct dcb_app *);   int (*ieee_delapp)(struct net_device *, struct dcb_app *);   int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *);   u8  (*getstate)(struct net_device *);   u8  (*setstate)(struct net_device *, u8 );   void (*getpermhwaddr)(struct net_device *, u8 *);   void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgtx)(struct net_device *, int, u8 );   void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgrx)(struct net_device *, int, u8 );   void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);   void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);   void (*setpfccfg)(struct net_device *, int, u8 );   void (*getpfccfg)(struct net_device *, int, u8 *);   u8  (*setall)(struct net_device *);   u8  (*getcap)(struct net_device *, int, u8 *);   int (*getnumtcs)(struct net_device *, int, u8 *);   int (*setnumtcs)(struct net_device *, int, u8 );   u8  (*getpfcstate)(struct net_device *);   void (*setpfcstate)(struct net_device *, u8 );   void (*getbcncfg)(struct net_device *, int, u32 *);   void (*setbcncfg)(struct net_device *, int, u32 );   void (*getbcnrp)(struct net_device *, int, u8 *);   void (*setbcnrp)(struct net_device *, int, u8 );   u8  (*setapp)(struct net_device *, u8 , u16 , u8 );   u8  (*getapp)(struct net_device *, u8 , u16 );   u8  (*getfeatcfg)(struct net_device *, int, u8 *);   u8  (*setfeatcfg)(struct net_device *, int, u8 );   u8  (*getdcbx)(struct net_device *);   u8  (*setdcbx)(struct net_device *, u8 );   int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *);   int (*peer_getapptable)(struct net_device *, struct dcb_app *);   int (*cee_peer_getpg)(struct net_device *, struct cee_pg *);   int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;   102     struct taskstats {   __u16 version;   __u32 ac_exitcode;   __u8 ac_flag;   __u8 ac_nice;   __u64 cpu_count;   __u64 cpu_delay_total;   __u64 blkio_count;   __u64 blkio_delay_total;   __u64 swapin_count;   __u64 swapin_delay_total;   __u64 cpu_run_real_total;   __u64 cpu_run_virtual_total;   char ac_comm[32U];   __u8 ac_sched;   __u8 ac_pad[3U];   __u32 ac_uid;   __u32 ac_gid;   __u32 ac_pid;   __u32 ac_ppid;   __u32 ac_btime;   __u64 ac_etime;   __u64 ac_utime;   __u64 ac_stime;   __u64 ac_minflt;   __u64 ac_majflt;   __u64 coremem;   __u64 virtmem;   __u64 hiwater_rss;   __u64 hiwater_vm;   __u64 read_char;   __u64 write_char;   __u64 read_syscalls;   __u64 write_syscalls;   __u64 read_bytes;   __u64 write_bytes;   __u64 cancelled_write_bytes;   __u64 nvcsw;   __u64 nivcsw;   __u64 ac_utimescaled;   __u64 ac_stimescaled;   __u64 cpu_scaled_run_real_total;   __u64 freepages_count;   __u64 freepages_delay_total; } ;    55     struct xattr_handler {   const char *prefix;   int flags;   size_t  (*list)(struct dentry *, char *, size_t , const char *, size_t , int);   int (*get)(struct dentry *, const char *, void *, size_t , int);   int (*set)(struct dentry *, const char *, const void *, size_t , int, int); } ;    53     struct simple_xattrs {   struct list_head head;   spinlock_t lock; } ;    98     struct percpu_ref ;    54     typedef void percpu_ref_func_t(struct percpu_ref *);    55     struct percpu_ref {   atomic_t count;   unsigned int *pcpu_count;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_kill;   struct callback_head rcu; } ;   173     struct cgroupfs_root ;   174     struct cgroup_subsys ;   175     struct cgroup ;    62     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   unsigned long flags;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   142     struct cgroup_name {   struct callback_head callback_head;   char name[]; } ;   160     struct cgroup {   unsigned long flags;   int id;   int nr_css;   struct list_head sibling;   struct list_head children;   struct list_head files;   struct cgroup *parent;   struct dentry *dentry;   u64 serial_nr;   struct cgroup_name *name;   struct cgroup_subsys_state *subsys[12U];   struct cgroupfs_root *root;   struct list_head cset_links;   struct list_head release_list;   struct list_head pidlists;   struct mutex pidlist_mutex;   struct cgroup_subsys_state dummy_css;   struct callback_head callback_head;   struct work_struct destroy_work;   struct simple_xattrs xattrs; } ;   252     struct cgroupfs_root {   struct super_block *sb;   unsigned long subsys_mask;   int hierarchy_id;   struct cgroup top_cgroup;   int number_of_cgroups;   struct list_head root_list;   unsigned long flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   342     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head cgrp_links;   struct cgroup_subsys_state *subsys[12U];   struct callback_head callback_head; } ;   392     struct cftype {   char name[64U];   int private;   umode_t mode;   size_t max_write_len;   unsigned int flags;   struct cgroup_subsys *ss;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   int (*write_string)(struct cgroup_subsys_state *, struct cftype *, const char *);   int (*trigger)(struct cgroup_subsys_state *, unsigned int); } ;   479     struct cftype_set {   struct list_head node;   struct cftype *cfts; } ;   546     struct cgroup_taskset ;   557     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*fork)(struct task_struct *);   void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   int subsys_id;   int disabled;   int early_init;   bool broken_hierarchy;   bool warned_broken_hierarchy;   const char *name;   struct cgroupfs_root *root;   struct list_head cftsets;   struct cftype *base_cftypes;   struct cftype_set base_cftset;   struct module *module; } ;    60     struct netprio_map {   struct callback_head rcu;   u32 priomap_len;   u32 priomap[]; } ;  3157     struct mnt_namespace ;  3158     struct ipc_namespace ;  3159     struct nsproxy {   atomic_t count;   struct uts_namespace *uts_ns;   struct ipc_namespace *ipc_ns;   struct mnt_namespace *mnt_ns;   struct pid_namespace *pid_ns_for_children;   struct net *net_ns; } ;    41     struct nlmsghdr {   __u32 nlmsg_len;   __u16 nlmsg_type;   __u16 nlmsg_flags;   __u32 nlmsg_seq;   __u32 nlmsg_pid; } ;   145     struct nlattr {   __u16 nla_len;   __u16 nla_type; } ;   102     struct netlink_callback {   struct sk_buff *skb;   const struct nlmsghdr *nlh;   int (*dump)(struct sk_buff *, struct netlink_callback *);   int (*done)(struct netlink_callback *);   void *data;   struct module *module;   u16 family;   u16 min_dump_alloc;   unsigned int prev_seq;   unsigned int seq;   long args[6U]; } ;   171     struct ndmsg {   __u8 ndm_family;   __u8 ndm_pad1;   __u16 ndm_pad2;   __s32 ndm_ifindex;   __u16 ndm_state;   __u8 ndm_flags;   __u8 ndm_type; } ;    39     struct rtnl_link_stats64 {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 rx_errors;   __u64 tx_errors;   __u64 rx_dropped;   __u64 tx_dropped;   __u64 multicast;   __u64 collisions;   __u64 rx_length_errors;   __u64 rx_over_errors;   __u64 rx_crc_errors;   __u64 rx_frame_errors;   __u64 rx_fifo_errors;   __u64 rx_missed_errors;   __u64 tx_aborted_errors;   __u64 tx_carrier_errors;   __u64 tx_fifo_errors;   __u64 tx_heartbeat_errors;   __u64 tx_window_errors;   __u64 rx_compressed;   __u64 tx_compressed; } ;   536     struct ifla_vf_info {   __u32 vf;   __u8 mac[32U];   __u32 vlan;   __u32 qos;   __u32 tx_rate;   __u32 spoofchk;   __u32 linkstate; } ;    27     struct netpoll_info ;    28     struct phy_device ;    29     struct wireless_dev ;    64     enum netdev_tx {   __NETDEV_TX_MIN = -2147483648,   NETDEV_TX_OK = 0,   NETDEV_TX_BUSY = 16,   NETDEV_TX_LOCKED = 32 } ;   116     typedef enum netdev_tx netdev_tx_t;   135     struct net_device_stats {   unsigned long rx_packets;   unsigned long tx_packets;   unsigned long rx_bytes;   unsigned long tx_bytes;   unsigned long rx_errors;   unsigned long tx_errors;   unsigned long rx_dropped;   unsigned long tx_dropped;   unsigned long multicast;   unsigned long collisions;   unsigned long rx_length_errors;   unsigned long rx_over_errors;   unsigned long rx_crc_errors;   unsigned long rx_frame_errors;   unsigned long rx_fifo_errors;   unsigned long rx_missed_errors;   unsigned long tx_aborted_errors;   unsigned long tx_carrier_errors;   unsigned long tx_fifo_errors;   unsigned long tx_heartbeat_errors;   unsigned long tx_window_errors;   unsigned long rx_compressed;   unsigned long tx_compressed; } ;   196     struct neigh_parms ;   217     struct netdev_hw_addr_list {   struct list_head list;   int count; } ;   222     struct hh_cache {   u16 hh_len;   u16 __pad;   seqlock_t hh_lock;   unsigned long hh_data[16U]; } ;   251     struct header_ops {   int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int);   int (*parse)(const struct sk_buff *, unsigned char *);   int (*rebuild)(struct sk_buff *);   int (*cache)(const struct neighbour *, struct hh_cache *, __be16 );   void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); } ;   302     struct napi_struct {   struct list_head poll_list;   unsigned long state;   int weight;   unsigned int gro_count;   int (*poll)(struct napi_struct *, int);   spinlock_t poll_lock;   int poll_owner;   struct net_device *dev;   struct sk_buff *gro_list;   struct sk_buff *skb;   struct list_head dev_list;   struct hlist_node napi_hash_node;   unsigned int napi_id; } ;   346     enum rx_handler_result {   RX_HANDLER_CONSUMED = 0,   RX_HANDLER_ANOTHER = 1,   RX_HANDLER_EXACT = 2,   RX_HANDLER_PASS = 3 } ;   394     typedef enum rx_handler_result rx_handler_result_t;   395     typedef rx_handler_result_t  rx_handler_func_t(struct sk_buff **);   532     struct Qdisc ;   532     struct netdev_queue {   struct net_device *dev;   struct Qdisc *qdisc;   struct Qdisc *qdisc_sleeping;   struct kobject kobj;   int numa_node;   spinlock_t _xmit_lock;   int xmit_lock_owner;   unsigned long trans_start;   unsigned long trans_timeout;   unsigned long state;   struct dql dql; } ;   594     struct rps_map {   unsigned int len;   struct callback_head rcu;   u16 cpus[0U]; } ;   606     struct rps_dev_flow {   u16 cpu;   u16 filter;   unsigned int last_qtail; } ;   618     struct rps_dev_flow_table {   unsigned int mask;   struct callback_head rcu;   struct rps_dev_flow flows[0U]; } ;   669     struct netdev_rx_queue {   struct rps_map *rps_map;   struct rps_dev_flow_table *rps_flow_table;   struct kobject kobj;   struct net_device *dev; } ;   692     struct xps_map {   unsigned int len;   unsigned int alloc_len;   struct callback_head rcu;   u16 queues[0U]; } ;   705     struct xps_dev_maps {   struct callback_head rcu;   struct xps_map *cpu_map[0U]; } ;   716     struct netdev_tc_txq {   u16 count;   u16 offset; } ;   727     struct netdev_fcoe_hbainfo {   char manufacturer[64U];   char serial_number[64U];   char hardware_version[64U];   char driver_version[64U];   char optionrom_version[64U];   char firmware_version[64U];   char model[256U];   char model_description[256U]; } ;   743     struct netdev_phys_port_id {   unsigned char id[32U];   unsigned char id_len; } ;   756     struct net_device_ops {   int (*ndo_init)(struct net_device *);   void (*ndo_uninit)(struct net_device *);   int (*ndo_open)(struct net_device *);   int (*ndo_stop)(struct net_device *);   netdev_tx_t  (*ndo_start_xmit)(struct sk_buff *, struct net_device *);   u16  (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16  (*)(struct net_device *, struct sk_buff *));   void (*ndo_change_rx_flags)(struct net_device *, int);   void (*ndo_set_rx_mode)(struct net_device *);   int (*ndo_set_mac_address)(struct net_device *, void *);   int (*ndo_validate_addr)(struct net_device *);   int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);   int (*ndo_set_config)(struct net_device *, struct ifmap *);   int (*ndo_change_mtu)(struct net_device *, int);   int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);   void (*ndo_tx_timeout)(struct net_device *);   struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);   struct net_device_stats * (*ndo_get_stats)(struct net_device *);   int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 );   int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 );   void (*ndo_poll_controller)(struct net_device *);   int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *, gfp_t );   void (*ndo_netpoll_cleanup)(struct net_device *);   int (*ndo_busy_poll)(struct napi_struct *);   int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);   int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 );   int (*ndo_set_vf_tx_rate)(struct net_device *, int, int);   int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool );   int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);   int (*ndo_set_vf_link_state)(struct net_device *, int, int);   int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);   int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);   int (*ndo_setup_tc)(struct net_device *, u8 );   int (*ndo_fcoe_enable)(struct net_device *);   int (*ndo_fcoe_disable)(struct net_device *);   int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_ddp_done)(struct net_device *, u16 );   int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *);   int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int);   int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 );   int (*ndo_add_slave)(struct net_device *, struct net_device *);   int (*ndo_del_slave)(struct net_device *, struct net_device *);   netdev_features_t  (*ndo_fix_features)(struct net_device *, netdev_features_t );   int (*ndo_set_features)(struct net_device *, netdev_features_t );   int (*ndo_neigh_construct)(struct neighbour *);   void (*ndo_neigh_destroy)(struct neighbour *);   int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 );   int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *);   int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, int);   int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *);   int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 );   int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *);   int (*ndo_change_carrier)(struct net_device *, bool );   int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_port_id *);   void (*ndo_add_vxlan_port)(struct net_device *, sa_family_t , __be16 );   void (*ndo_del_vxlan_port)(struct net_device *, sa_family_t , __be16 );   void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);   void (*ndo_dfwd_del_station)(struct net_device *, void *);   netdev_tx_t  (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); } ;  1161     struct __anonstruct_adj_list_244 {   struct list_head upper;   struct list_head lower; } ;  1161     struct __anonstruct_all_adj_list_245 {   struct list_head upper;   struct list_head lower; } ;  1161     struct iw_handler_def ;  1161     struct iw_public_data ;  1161     struct forwarding_accel_ops ;  1161     struct vlan_info ;  1161     struct tipc_bearer ;  1161     struct in_device ;  1161     struct dn_dev ;  1161     struct inet6_dev ;  1161     struct cpu_rmap ;  1161     struct pcpu_lstats ;  1161     struct pcpu_sw_netstats ;  1161     struct pcpu_dstats ;  1161     struct pcpu_vstats ;  1161     union __anonunion____missing_field_name_246 {   void *ml_priv;   struct pcpu_lstats *lstats;   struct pcpu_sw_netstats *tstats;   struct pcpu_dstats *dstats;   struct pcpu_vstats *vstats; } ;  1161     struct garp_port ;  1161     struct mrp_port ;  1161     struct rtnl_link_ops ;  1161     struct net_device {   char name[16U];   struct hlist_node name_hlist;   char *ifalias;   unsigned long mem_end;   unsigned long mem_start;   unsigned long base_addr;   int irq;   unsigned long state;   struct list_head dev_list;   struct list_head napi_list;   struct list_head unreg_list;   struct list_head close_list;   struct __anonstruct_adj_list_244 adj_list;   struct __anonstruct_all_adj_list_245 all_adj_list;   netdev_features_t features;   netdev_features_t hw_features;   netdev_features_t wanted_features;   netdev_features_t vlan_features;   netdev_features_t hw_enc_features;   netdev_features_t mpls_features;   int ifindex;   int iflink;   struct net_device_stats stats;   atomic_long_t rx_dropped;   const struct iw_handler_def *wireless_handlers;   struct iw_public_data *wireless_data;   const struct net_device_ops *netdev_ops;   const struct ethtool_ops *ethtool_ops;   const struct forwarding_accel_ops *fwd_ops;   const struct header_ops *header_ops;   unsigned int flags;   unsigned int priv_flags;   unsigned short gflags;   unsigned short padded;   unsigned char operstate;   unsigned char link_mode;   unsigned char if_port;   unsigned char dma;   unsigned int mtu;   unsigned short type;   unsigned short hard_header_len;   unsigned short needed_headroom;   unsigned short needed_tailroom;   unsigned char perm_addr[32U];   unsigned char addr_assign_type;   unsigned char addr_len;   unsigned short neigh_priv_len;   unsigned short dev_id;   spinlock_t addr_list_lock;   struct netdev_hw_addr_list uc;   struct netdev_hw_addr_list mc;   struct netdev_hw_addr_list dev_addrs;   struct kset *queues_kset;   bool uc_promisc;   unsigned int promiscuity;   unsigned int allmulti;   struct vlan_info *vlan_info;   struct dsa_switch_tree *dsa_ptr;   struct tipc_bearer *tipc_ptr;   void *atalk_ptr;   struct in_device *ip_ptr;   struct dn_dev *dn_ptr;   struct inet6_dev *ip6_ptr;   void *ax25_ptr;   struct wireless_dev *ieee80211_ptr;   unsigned long last_rx;   unsigned char *dev_addr;   struct netdev_rx_queue *_rx;   unsigned int num_rx_queues;   unsigned int real_num_rx_queues;   rx_handler_func_t *rx_handler;   void *rx_handler_data;   struct netdev_queue *ingress_queue;   unsigned char broadcast[32U];   struct netdev_queue *_tx;   unsigned int num_tx_queues;   unsigned int real_num_tx_queues;   struct Qdisc *qdisc;   unsigned long tx_queue_len;   spinlock_t tx_global_lock;   struct xps_dev_maps *xps_maps;   struct cpu_rmap *rx_cpu_rmap;   unsigned long trans_start;   int watchdog_timeo;   struct timer_list watchdog_timer;   int *pcpu_refcnt;   struct list_head todo_list;   struct hlist_node index_hlist;   struct list_head link_watch_list;   unsigned char reg_state;   bool dismantle;   unsigned short rtnl_link_state;   void (*destructor)(struct net_device *);   struct netpoll_info *npinfo;   struct net *nd_net;   union __anonunion____missing_field_name_246 __annonCompField76;   struct garp_port *garp_port;   struct mrp_port *mrp_port;   struct device dev;   const struct attribute_group *sysfs_groups[4U];   const struct attribute_group *sysfs_rx_queue_group;   const struct rtnl_link_ops *rtnl_link_ops;   unsigned int gso_max_size;   u16 gso_max_segs;   const struct dcbnl_rtnl_ops *dcbnl_ops;   u8 num_tc;   struct netdev_tc_txq tc_to_txq[16U];   u8 prio_tc_map[16U];   unsigned int fcoe_ddp_xid;   struct netprio_map *priomap;   struct phy_device *phydev;   struct lock_class_key *qdisc_tx_busylock;   int group;   struct pm_qos_request pm_qos_req; } ;  1722     struct pcpu_sw_netstats {   u64 rx_packets;   u64 rx_bytes;   u64 tx_packets;   u64 tx_bytes;   struct u64_stats_sync syncp; } ;  2465     enum skb_free_reason {   SKB_REASON_CONSUMED = 0,   SKB_REASON_DROPPED = 1 } ;   143     struct __anonstruct_isl38xx_fragment_247 {   __le32 address;   __le16 size;   __le16 flags; } ;   143     typedef struct __anonstruct_isl38xx_fragment_247 isl38xx_fragment;   144     struct isl38xx_cb {   __le32 driver_curr_frag[6U];   __le32 device_curr_frag[6U];   isl38xx_fragment rx_data_low[8U];   isl38xx_fragment tx_data_low[32U];   isl38xx_fragment rx_data_high[8U];   isl38xx_fragment tx_data_high[32U];   isl38xx_fragment rx_data_mgmt[4U];   isl38xx_fragment tx_data_mgmt[4U]; } ;   156     typedef struct isl38xx_cb isl38xx_control_block;   169     struct iw_param {   __s32 value;   __u8 fixed;   __u8 disabled;   __u16 flags; } ;   680     struct iw_point {   void *pointer;   __u16 length;   __u16 flags; } ;   691     struct iw_freq {   __s32 m;   __s16 e;   __u8 i;   __u8 flags; } ;   708     struct iw_quality {   __u8 qual;   __u8 level;   __u8 noise;   __u8 updated; } ;   720     struct iw_discarded {   __u32 nwid;   __u32 code;   __u32 fragment;   __u32 retries;   __u32 misc; } ;   736     struct iw_missed {   __u32 beacon; } ;   882     struct iw_statistics {   __u16 status;   struct iw_quality qual;   struct iw_discarded discard;   struct iw_missed miss; } ;   897     union iwreq_data {   char name[16U];   struct iw_point essid;   struct iw_param nwid;   struct iw_freq freq;   struct iw_param sens;   struct iw_param bitrate;   struct iw_param txpower;   struct iw_param rts;   struct iw_param frag;   __u32 mode;   struct iw_param retry;   struct iw_point encoding;   struct iw_param power;   struct iw_quality qual;   struct sockaddr ap_addr;   struct sockaddr addr;   struct iw_param param;   struct iw_point data; } ;  1068     struct iw_priv_args {   __u32 cmd;   __u16 set_args;   __u16 get_args;   char name[16U]; } ;    30     struct iw_request_info {   __u16 cmd;   __u16 flags; } ;   314     typedef int (*iw_handler)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *);   315     struct iw_handler_def {   const iw_handler  (**standard)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *);   __u16 num_standard;   __u16 num_private;   __u16 num_private_args;   const iw_handler  (**private)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *);   const struct iw_priv_args *private_args;   struct iw_statistics * (*get_wireless_stats)(struct net_device *); } ;   381     struct iw_spy_data {   int spy_number;   u_char spy_address[8U][6U];   struct iw_quality spy_stat[8U];   struct iw_quality spy_thr_low;   struct iw_quality spy_thr_high;   u_char spy_thr_under[8U]; } ;   405     struct libipw_device ;   406     struct iw_public_data {   struct iw_spy_data *spy_data;   struct libipw_device *libipw; } ;   104     struct __anonstruct_pimfor_header_t_249 {   u8 version;   u8 operation;   u32 oid;   u8 device_id;   u8 flags;   u32 length; } ;   104     typedef struct __anonstruct_pimfor_header_t_249 pimfor_header_t;   104     struct islpci_mgmtframe {   struct net_device *ndev;   pimfor_header_t *header;   void *data;   struct work_struct ws;   char buf[0U]; } ;   137     enum ldv_30720 {   PRV_STATE_OFF = 0,   PRV_STATE_PREBOOT = 1,   PRV_STATE_BOOT = 2,   PRV_STATE_POSTBOOT = 3,   PRV_STATE_PREINIT = 4,   PRV_STATE_INIT = 5,   PRV_STATE_READY = 6,   PRV_STATE_SLEEP = 7 } ;    46     typedef enum ldv_30720 islpci_state_t;    53     enum ldv_30726 {   MAC_POLICY_OPEN = 0,   MAC_POLICY_ACCEPT = 1,   MAC_POLICY_REJECT = 2 } ;    59     struct islpci_acl {   enum ldv_30726 policy;   struct list_head mac_list;   int size;   struct mutex lock; } ;    60     struct islpci_membuf {   int size;   void *mem;   dma_addr_t pci_addr; } ;   184     struct __anonstruct_islpci_private_250 {   spinlock_t slock;   u32 priv_oid;   u32 iw_mode;   struct rw_semaphore mib_sem;   void **mib;   char nickname[33U];   struct work_struct stats_work;   struct mutex stats_lock;   unsigned long stats_timestamp;   struct iw_statistics local_iwstatistics;   struct iw_statistics iwstatistics;   struct iw_spy_data spy_data;   struct iw_public_data wireless_data;   int monitor_type;   struct islpci_acl acl;   struct pci_dev *pdev;   char firmware[33U];   void *device_base;   void *driver_mem_address;   dma_addr_t device_host_address;   dma_addr_t device_psm_buffer;   struct net_device *ndev;   struct isl38xx_cb *control_block;   u32 index_mgmt_rx;   u32 index_mgmt_tx;   u32 free_data_rx;   u32 free_data_tx;   u32 data_low_tx_full;   struct islpci_membuf mgmt_tx[4U];   struct islpci_membuf mgmt_rx[4U];   struct sk_buff *data_low_tx[32U];   struct sk_buff *data_low_rx[8U];   dma_addr_t pci_map_tx_address[32U];   dma_addr_t pci_map_rx_address[8U];   wait_queue_head_t reset_done;   struct mutex mgmt_lock;   struct islpci_mgmtframe *mgmt_received;   wait_queue_head_t mgmt_wqueue;   islpci_state_t state;   int state_off;   int wpa;   struct list_head bss_wpa_list;   int num_bss_wpa;   struct mutex wpa_lock;   u8 wpa_ie[64U];   size_t wpa_ie_len;   struct work_struct reset_task;   int reset_task_pending; } ;   184     typedef struct __anonstruct_islpci_private_250 islpci_private;   212     struct rfmon_header {   __le16 unk0;   __le16 length;   __le32 clock;   u8 flags;   u8 unk1;   u8 rate;   u8 unk2;   __le16 freq;   __le16 unk3;   u8 rssi;   u8 padding[3U]; } ;    37     struct rx_annex_header {   u8 addr1[6U];   u8 addr2[6U];   struct rfmon_header rfmon; } ;    43     struct avs_80211_1_header {   __be32 version;   __be32 length;   __be64 mactime;   __be64 hosttime;   __be32 phytype;   __be32 channel;   __be32 datarate;   __be32 antenna;   __be32 priority;   __be32 ssi_type;   __be32 ssi_signal;   __be32 ssi_noise;   __be32 preamble;   __be32 encoding; } ;    12     struct __wait_queue ;    12     typedef struct __wait_queue wait_queue_t;    15     struct __wait_queue {   unsigned int flags;   void *private;   int (*func)(wait_queue_t *, unsigned int, int, void *);   struct list_head task_list; } ;   803     struct iw_encode_ext {   __u32 ext_flags;   __u8 tx_seq[8U];   __u8 rx_seq[8U];   struct sockaddr addr;   __u16 alg;   __u16 key_len;   __u8 key[0U]; } ;   957     struct iw_range {   __u32 throughput;   __u32 min_nwid;   __u32 max_nwid;   __u16 old_num_channels;   __u8 old_num_frequency;   __u8 scan_capa;   __u32 event_capa[6U];   __s32 sensitivity;   struct iw_quality max_qual;   struct iw_quality avg_qual;   __u8 num_bitrates;   __s32 bitrate[32U];   __s32 min_rts;   __s32 max_rts;   __s32 min_frag;   __s32 max_frag;   __s32 min_pmp;   __s32 max_pmp;   __s32 min_pmt;   __s32 max_pmt;   __u16 pmp_flags;   __u16 pmt_flags;   __u16 pm_capa;   __u16 encoding_size[8U];   __u8 num_encoding_sizes;   __u8 max_encoding_tokens;   __u8 encoding_login_index;   __u16 txpower_capa;   __u8 num_txpower;   __s32 txpower[8U];   __u8 we_version_compiled;   __u8 we_version_source;   __u16 retry_capa;   __u16 retry_flags;   __u16 r_time_flags;   __s32 min_retry;   __s32 max_retry;   __s32 min_r_time;   __s32 max_r_time;   __u16 num_channels;   __u8 num_frequency;   struct iw_freq freq[32U];   __u32 enc_capa; } ;  1080     struct iw_event {   __u16 len;   __u16 cmd;   union iwreq_data u; } ;   169     struct obj_ssid {   u8 length;   char octets[33U]; } ;    32     struct obj_key {   u8 type;   u8 length;   char key[32U]; } ;    38     struct obj_mlme {   u8 address[6U];   u16 id;   u16 state;   u16 code; } ;    45     struct obj_mlmeex {   u8 address[6U];   u16 id;   u16 state;   u16 code;   u16 size;   u8 data[0U]; } ;    54     struct obj_buffer {   u32 size;   u32 addr; } ;    59     struct obj_bss {   u8 address[6U];   short;   char state;   char reserved;   short age;   char quality;   char rssi;   struct obj_ssid ssid;   short channel;   char beacon_period;   char dtim_period;   short capinfo;   short rates;   short basic_rates;   short; } ;    80     struct obj_bsslist {   u32 nr;   struct obj_bss bsslist[0U]; } ;    85     struct obj_frequencies {   u16 nr;   u16 mhz[0U]; } ;    90     struct obj_attachment {   char type;   char reserved;   short id;   short size;   char data[0U]; } ;   212     enum oid_num_t {   GEN_OID_MACADDRESS = 0,   GEN_OID_LINKSTATE = 1,   GEN_OID_WATCHDOG = 2,   GEN_OID_MIBOP = 3,   GEN_OID_OPTIONS = 4,   GEN_OID_LEDCONFIG = 5,   DOT11_OID_BSSTYPE = 6,   DOT11_OID_BSSID = 7,   DOT11_OID_SSID = 8,   DOT11_OID_STATE = 9,   DOT11_OID_AID = 10,   DOT11_OID_COUNTRYSTRING = 11,   DOT11_OID_SSIDOVERRIDE = 12,   DOT11_OID_MEDIUMLIMIT = 13,   DOT11_OID_BEACONPERIOD = 14,   DOT11_OID_DTIMPERIOD = 15,   DOT11_OID_ATIMWINDOW = 16,   DOT11_OID_LISTENINTERVAL = 17,   DOT11_OID_CFPPERIOD = 18,   DOT11_OID_CFPDURATION = 19,   DOT11_OID_AUTHENABLE = 20,   DOT11_OID_PRIVACYINVOKED = 21,   DOT11_OID_EXUNENCRYPTED = 22,   DOT11_OID_DEFKEYID = 23,   DOT11_OID_DEFKEYX = 24,   DOT11_OID_STAKEY = 25,   DOT11_OID_REKEYTHRESHOLD = 26,   DOT11_OID_STASC = 27,   DOT11_OID_PRIVTXREJECTED = 28,   DOT11_OID_PRIVRXPLAIN = 29,   DOT11_OID_PRIVRXFAILED = 30,   DOT11_OID_PRIVRXNOKEY = 31,   DOT11_OID_RTSTHRESH = 32,   DOT11_OID_FRAGTHRESH = 33,   DOT11_OID_SHORTRETRIES = 34,   DOT11_OID_LONGRETRIES = 35,   DOT11_OID_MAXTXLIFETIME = 36,   DOT11_OID_MAXRXLIFETIME = 37,   DOT11_OID_AUTHRESPTIMEOUT = 38,   DOT11_OID_ASSOCRESPTIMEOUT = 39,   DOT11_OID_ALOFT_TABLE = 40,   DOT11_OID_ALOFT_CTRL_TABLE = 41,   DOT11_OID_ALOFT_RETREAT = 42,   DOT11_OID_ALOFT_PROGRESS = 43,   DOT11_OID_ALOFT_FIXEDRATE = 44,   DOT11_OID_ALOFT_RSSIGRAPH = 45,   DOT11_OID_ALOFT_CONFIG = 46,   DOT11_OID_VDCFX = 47,   DOT11_OID_MAXFRAMEBURST = 48,   DOT11_OID_PSM = 49,   DOT11_OID_CAMTIMEOUT = 50,   DOT11_OID_RECEIVEDTIMS = 51,   DOT11_OID_ROAMPREFERENCE = 52,   DOT11_OID_BRIDGELOCAL = 53,   DOT11_OID_CLIENTS = 54,   DOT11_OID_CLIENTSASSOCIATED = 55,   DOT11_OID_CLIENTX = 56,   DOT11_OID_CLIENTFIND = 57,   DOT11_OID_WDSLINKADD = 58,   DOT11_OID_WDSLINKREMOVE = 59,   DOT11_OID_EAPAUTHSTA = 60,   DOT11_OID_EAPUNAUTHSTA = 61,   DOT11_OID_DOT1XENABLE = 62,   DOT11_OID_MICFAILURE = 63,   DOT11_OID_REKEYINDICATE = 64,   DOT11_OID_MPDUTXSUCCESSFUL = 65,   DOT11_OID_MPDUTXONERETRY = 66,   DOT11_OID_MPDUTXMULTIPLERETRIES = 67,   DOT11_OID_MPDUTXFAILED = 68,   DOT11_OID_MPDURXSUCCESSFUL = 69,   DOT11_OID_MPDURXDUPS = 70,   DOT11_OID_RTSSUCCESSFUL = 71,   DOT11_OID_RTSFAILED = 72,   DOT11_OID_ACKFAILED = 73,   DOT11_OID_FRAMERECEIVES = 74,   DOT11_OID_FRAMEERRORS = 75,   DOT11_OID_FRAMEABORTS = 76,   DOT11_OID_FRAMEABORTSPHY = 77,   DOT11_OID_SLOTTIME = 78,   DOT11_OID_CWMIN = 79,   DOT11_OID_CWMAX = 80,   DOT11_OID_ACKWINDOW = 81,   DOT11_OID_ANTENNARX = 82,   DOT11_OID_ANTENNATX = 83,   DOT11_OID_ANTENNADIVERSITY = 84,   DOT11_OID_CHANNEL = 85,   DOT11_OID_EDTHRESHOLD = 86,   DOT11_OID_PREAMBLESETTINGS = 87,   DOT11_OID_RATES = 88,   DOT11_OID_CCAMODESUPPORTED = 89,   DOT11_OID_CCAMODE = 90,   DOT11_OID_RSSIVECTOR = 91,   DOT11_OID_OUTPUTPOWERTABLE = 92,   DOT11_OID_OUTPUTPOWER = 93,   DOT11_OID_SUPPORTEDRATES = 94,   DOT11_OID_FREQUENCY = 95,   DOT11_OID_SUPPORTEDFREQUENCIES = 96,   DOT11_OID_NOISEFLOOR = 97,   DOT11_OID_FREQUENCYACTIVITY = 98,   DOT11_OID_IQCALIBRATIONTABLE = 99,   DOT11_OID_NONERPPROTECTION = 100,   DOT11_OID_SLOTSETTINGS = 101,   DOT11_OID_NONERPTIMEOUT = 102,   DOT11_OID_PROFILES = 103,   DOT11_OID_EXTENDEDRATES = 104,   DOT11_OID_DEAUTHENTICATE = 105,   DOT11_OID_AUTHENTICATE = 106,   DOT11_OID_DISASSOCIATE = 107,   DOT11_OID_ASSOCIATE = 108,   DOT11_OID_SCAN = 109,   DOT11_OID_BEACON = 110,   DOT11_OID_PROBE = 111,   DOT11_OID_DEAUTHENTICATEEX = 112,   DOT11_OID_AUTHENTICATEEX = 113,   DOT11_OID_DISASSOCIATEEX = 114,   DOT11_OID_ASSOCIATEEX = 115,   DOT11_OID_REASSOCIATE = 116,   DOT11_OID_REASSOCIATEEX = 117,   DOT11_OID_NONERPSTATUS = 118,   DOT11_OID_STATIMEOUT = 119,   DOT11_OID_MLMEAUTOLEVEL = 120,   DOT11_OID_BSSTIMEOUT = 121,   DOT11_OID_ATTACHMENT = 122,   DOT11_OID_PSMBUFFER = 123,   DOT11_OID_BSSS = 124,   DOT11_OID_BSSX = 125,   DOT11_OID_BSSFIND = 126,   DOT11_OID_BSSLIST = 127,   OID_INL_TUNNEL = 128,   OID_INL_MEMADDR = 129,   OID_INL_MEMORY = 130,   OID_INL_MODE = 131,   OID_INL_COMPONENT_NR = 132,   OID_INL_VERSION = 133,   OID_INL_INTERFACE_ID = 134,   OID_INL_COMPONENT_ID = 135,   OID_INL_CONFIG = 136,   OID_INL_DOT11D_CONFORMANCE = 137,   OID_INL_PHYCAPABILITIES = 138,   OID_INL_OUTPUTPOWER = 139,   OID_NUM_LAST = 140 } ;   356     struct oid_t {   enum oid_num_t oid;   short range;   short size;   char flags; } ;   492     union oid_res_t {   void *ptr;   u32 u; } ;    47     struct mac_entry {   struct list_head _list;   char addr[6U]; } ;    66     struct islpci_bss_wpa_ie {   struct list_head list;   unsigned long last_update;   u8 bssid[6U];   u8 wpa_ie[64U];   size_t wpa_ie_len; } ;  2100     struct ieee80211_beacon_phdr {   u8 timestamp[8U];   u16 beacon_int;   u16 capab_info; } ;   394     struct paravirt_callee_save {   void *func; } ;   196     struct pv_irq_ops {   struct paravirt_callee_save save_fl;   struct paravirt_callee_save restore_fl;   struct paravirt_callee_save irq_disable;   struct paravirt_callee_save irq_enable;   void (*safe_halt)();   void (*halt)();   void (*adjust_exception_frame)(); } ;   419     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;    47     struct firmware {   size_t size;   const u8 *data;   struct page **pages;   void *priv; } ;    38     typedef int Set;     1     long int __builtin_expect(long, long);     1     void * __builtin_memcpy(void *, const void *, unsigned long);     7     dma_addr_t  ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);    71     void set_bit(long nr, volatile unsigned long *addr);   109     void clear_bit(long nr, volatile unsigned long *addr);   252     int test_and_clear_bit(long nr, volatile unsigned long *addr);     7     __u32  __arch_swab32(__u32 val);    14     __u64  __arch_swab64(__u64 val);    57     __u32  __fswab32(__u32 val);    68     __u64  __fswab64(__u64 val);   148     void le32_add_cpu(__le32 *var, u32 val);   132     int printk(const char *, ...);    71     void warn_slowpath_null(const char *, const int);    58     void * memmove(void *, const void *, size_t );    24     int atomic_read(const atomic_t *v);    32     unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);    43     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   290     raw_spinlock_t * spinlock_check(spinlock_t *lock);   356     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);    77     extern volatile unsigned long jiffies;   375     extern struct workqueue_struct *system_wq;   464     bool  queue_work_on(int, struct workqueue_struct *, struct work_struct *);   504     bool  queue_work(struct workqueue_struct *wq, struct work_struct *work);   563     bool  schedule_work(struct work_struct *work);    55     unsigned int readl(const volatile void *addr);    63     void writel(unsigned int val, volatile void *addr);    63     int valid_dma_direction(int dma_direction);    44     void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );    30     extern struct dma_map_ops *dma_ops;    32     struct dma_map_ops * get_dma_ops(struct device *dev);    32     dma_addr_t  ldv_dma_map_single_attrs_1(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);    33     void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);    30     dma_addr_t  pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);    36     void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);    10     void __const_udelay(unsigned long);   654     void consume_skb(struct sk_buff *);   692     struct sk_buff * skb_copy_expand(const struct sk_buff *, int, int, gfp_t );   798     unsigned char * skb_end_pointer(const struct sk_buff *skb);   926     int skb_cloned(const struct sk_buff *skb);  1369     bool  skb_is_nonlinear(const struct sk_buff *skb);  1494     unsigned char * skb_put(struct sk_buff *, unsigned int);  1504     unsigned char * skb_push(struct sk_buff *, unsigned int);  1512     unsigned char * skb_pull(struct sk_buff *, unsigned int);  1556     unsigned int skb_headroom(const struct sk_buff *skb);  1567     int skb_tailroom(const struct sk_buff *skb);  1595     void skb_reserve(struct sk_buff *skb, int len);  1712     void skb_reset_mac_header(struct sk_buff *skb);  1933     struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );  1949     struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);  1963     struct sk_buff * dev_alloc_skb(unsigned int length);  2504     void skb_copy_from_linear_data(const struct sk_buff *skb, void *to, const unsigned int len);  1537     struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);  1603     void * netdev_priv(const struct net_device *dev);  1888     int netpoll_trap();  2050     void __netif_schedule(struct Qdisc *);  2066     void netif_tx_start_queue(struct netdev_queue *dev_queue);  2092     void netif_tx_wake_queue(struct netdev_queue *dev_queue);  2111     void netif_wake_queue(struct net_device *dev);  2126     void netif_tx_stop_queue(struct netdev_queue *dev_queue);  2142     void netif_stop_queue(struct net_device *dev);  2471     void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason );  2493     void dev_kfree_skb_irq(struct sk_buff *skb);  2513     int netif_rx(struct sk_buff *);    32     __be16  eth_type_trans(struct sk_buff *, struct net_device *);    75     void isl38xx_w32_flush(void *base, u32 val, unsigned long offset);   167     void isl38xx_trigger_device(int asleep, void *device_base);   459     void wireless_spy_update(struct net_device *, unsigned char *, struct iw_quality *);    33     int pc_debug;   187     islpci_state_t  islpci_get_state(islpci_private *priv);   201     int islpci_reset(islpci_private *priv, int reload_firmware);   204     void islpci_trigger(islpci_private *priv);    65     void islpci_eth_cleanup_transmit(islpci_private *priv, isl38xx_control_block *control_block);    66     netdev_tx_t  islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev);    67     int islpci_eth_receive(islpci_private *priv);    68     void islpci_eth_tx_timeout(struct net_device *ndev);    69     void islpci_do_reset_and_wake(struct work_struct *work);    32     int channel_of_freq(int f);   245     int islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb);   167     __u32  __swab32p(const __u32 *p);   235     void __swab32s(__u32 *p);    24     void INIT_LIST_HEAD(struct list_head *list);    88     void __bad_percpu_size();    10     extern struct task_struct *current_task;    12     struct task_struct * get_current();    55     void * memset(void *, int, size_t );    11     void __xchg_wrong_size();   279     void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int);   141     int mutex_lock_interruptible_nested(struct mutex *, unsigned int);   174     void mutex_unlock(struct mutex *);   144     void __wake_up(wait_queue_head_t *, unsigned int, int, void *);   820     void prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);   823     void finish_wait(wait_queue_head_t *, wait_queue_t *);   825     int autoremove_wake_function(wait_queue_t *, unsigned int, int, void *);   303     unsigned long int msecs_to_jiffies(const unsigned int);   192     void __init_work(struct work_struct *, int);   375     long int schedule_timeout_uninterruptible(long);   142     void kfree(const void *);   302     void * __kmalloc(size_t , gfp_t );   441     void * kmalloc(size_t size, gfp_t flags);    59     void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int);    33     void dma_unmap_single_attrs___0(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);   109     void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);    30     dma_addr_t  pci_map_single___0(struct pci_dev *hwdev, void *ptr, size_t size, int direction);    36     void pci_unmap_single___0(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);    71     void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);   118     int islpci_mgt_receive(struct net_device *ndev);   121     int islpci_mgmt_rx_fill(struct net_device *ndev);   124     void islpci_mgt_cleanup_transmit(struct net_device *ndev);   127     int islpci_mgt_transaction(struct net_device *ndev, int operation, unsigned long oid, void *senddata, int sendlen, struct islpci_mgmtframe **recvframe);    38     void prism54_process_trap(struct work_struct *work);    39     int pc_debug = 1;    70     void pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h);    84     pimfor_header_t * pimfor_decode_header(void *data, int len);   163     int islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid, void *data, int length);   159     int isl38xx_in_queue(isl38xx_control_block *cb, int queue);   161     void isl38xx_disable_interrupts(void *device);   162     void isl38xx_enable_common_interrupts(void *device_base);   164     void isl38xx_handle_sleep_request(isl38xx_control_block *control_block, int *powerstate, void *device_base);   166     void isl38xx_handle_wakeup(isl38xx_control_block *control_block, int *powerstate, void *device_base);   168     void isl38xx_interface_reset(void *device_base, dma_addr_t host_address);    47     void __list_add(struct list_head *, struct list_head *, struct list_head *);    60     void list_add(struct list_head *new, struct list_head *head);    74     void list_add_tail(struct list_head *new, struct list_head *head);   111     void __list_del_entry(struct list_head *);   112     void list_del(struct list_head *);   153     void list_move(struct list_head *list, struct list_head *head);   391     int snprintf(char *, size_t , const char *, ...);    34     void * __memcpy(void *, const void *, size_t );    60     int memcmp(const void *, const void *, size_t );    61     size_t  strlen(const char *);    62     char * strcpy(char *, const char *);    23     char * strncpy(char *, const char *, __kernel_size_t );   119     void __mutex_init(struct mutex *, const char *, struct lock_class_key *);   138     void mutex_lock_nested(struct mutex *, unsigned int);   173     int mutex_trylock(struct mutex *);    91     void down_read(struct rw_semaphore *);   101     void down_write(struct rw_semaphore *);   111     void up_read(struct rw_semaphore *);   116     void up_write(struct rw_semaphore *);   638     void * kzalloc(size_t size, gfp_t flags);  2610     void netif_carrier_on(struct net_device *);  2612     void netif_carrier_off(struct net_device *);   266     bool  ether_addr_equal(const u8 *addr1, const u8 *addr2);   133     void islpci_mgt_release(struct islpci_mgmtframe *frame);   440     void wireless_send_event(struct net_device *, unsigned int, union iwreq_data *, const char *);   447     int iw_handler_set_spy(struct net_device *, struct iw_request_info *, union iwreq_data *, char *);   450     int iw_handler_get_spy(struct net_device *, struct iw_request_info *, union iwreq_data *, char *);   453     int iw_handler_set_thrspy(struct net_device *, struct iw_request_info *, union iwreq_data *, char *);   456     int iw_handler_get_thrspy(struct net_device *, struct iw_request_info *, union iwreq_data *, char *);   467     int iwe_stream_lcp_len(struct iw_request_info *info);   476     int iwe_stream_point_len(struct iw_request_info *info);   485     int iwe_stream_event_len_adjust(struct iw_request_info *info, int event_len);   503     char * iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, int event_len);   528     char * iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, char *extra);   555     char * iwe_stream_add_value(struct iw_request_info *info, char *event, char *value, char *ends, struct iw_event *iwe, int event_len);    30     void prism54_mib_init(islpci_private *priv);    32     struct iw_statistics * prism54_get_wireless_stats(struct net_device *ndev);    33     void prism54_update_stats(struct work_struct *work);    35     void prism54_acl_init(struct islpci_acl *acl);    36     void prism54_acl_clean(struct islpci_acl *acl);    40     void prism54_wpa_bss_ie_init(islpci_private *priv);    41     void prism54_wpa_bss_ie_clean(islpci_private *priv);    43     int prism54_set_mac_address(struct net_device *ndev, void *addr);    45     const struct iw_handler_def prism54_handler_def;    24     struct oid_t isl_oid[140U];    34     void mgt_le_to_cpu(int type, void *data);    36     int mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data);    37     int mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len);    40     int mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data, union oid_res_t *res);    43     int mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n);    45     void mgt_set(islpci_private *priv, enum oid_num_t n, void *data);    47     void mgt_get(islpci_private *priv, enum oid_num_t n, void *res);    49     int mgt_commit(islpci_private *priv);    51     int mgt_mlme_answer(islpci_private *priv);    53     enum oid_num_t  mgt_oidtonum(u32 oid);    55     int mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str);    45     void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, u8 *wpa_ie, size_t wpa_ie_len);    47     size_t  prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);    48     int prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);    52     const unsigned char scan_rate_list[12U] = { 2U, 4U, 11U, 22U, 12U, 18U, 24U, 36U, 48U, 72U, 96U, 108U };    68     int prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode);   243     int prism54_commit(struct net_device *ndev, struct iw_request_info *info, char *cwrq, char *extra);   259     int prism54_get_name(struct net_device *ndev, struct iw_request_info *info, char *cwrq, char *extra);   290     int prism54_set_freq(struct net_device *ndev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra);   310     int prism54_get_freq(struct net_device *ndev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra);   327     int prism54_set_mode(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);   371     int prism54_get_mode(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);   389     int prism54_set_sens(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   402     int prism54_get_sens(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   419     int prism54_get_range(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);   524     int prism54_set_wap(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra);   546     int prism54_get_wap(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra);   562     int prism54_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   575     char * prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info, char *current_ev, char *end_buf, struct obj_bss *bss, char noise);   682     int prism54_get_scan(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);   736     int prism54_set_essid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);   763     int prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);   793     int prism54_set_nick(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);   810     int prism54_get_nick(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);   828     int prism54_set_rate(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   901     int prism54_get_rate(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   929     int prism54_set_rts(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   938     int prism54_get_rts(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   953     int prism54_set_frag(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   962     int prism54_get_frag(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);   983     int prism54_set_retry(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);  1025     int prism54_get_retry(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);  1057     int prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);  1158     int prism54_get_encode(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);  1206     int prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);  1226     int prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra);  1251     int prism54_set_genie(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra);  1300     int prism54_get_genie(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra);  1321     int prism54_set_auth(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra);  1448     int prism54_get_auth(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra);  1538     int prism54_set_encodeext(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra);  1647     int prism54_get_encodeext(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra);  1742     int prism54_reset(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  1751     int prism54_get_oid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);  1766     int prism54_set_u32(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  1775     int prism54_set_raw(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);  1793     void prism54_clear_mac(struct islpci_acl *acl);  1822     int prism54_add_mac(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra);  1851     int prism54_del_mac(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra);  1878     int prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);  1906     int prism54_set_policy(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  1942     int prism54_get_policy(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  1956     int prism54_mac_accept(struct islpci_acl *acl, char *mac);  1982     int prism54_kick_all(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra);  2002     int prism54_kick_mac(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra);  2030     void format_event(islpci_private *priv, char *dest, const char *str, const struct obj_mlme *mlme, u16 *length, int error);  2045     void send_formatted_event(islpci_private *priv, const char *str, const struct obj_mlme *mlme, int error);  2063     void send_simple_event(islpci_private *priv, const char *str);  2081     void link_changed(struct net_device *ndev, u32 bitrate);  2109     u8 wpa_oid[4U] = { 0U, 80U, 242U, 1U };  2217     void prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, u8 *payload, size_t len);  2245     void handle_request(islpci_private *priv, struct obj_mlme *mlme, enum oid_num_t oid);  2260     int prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, char *data);  2545     int prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  2554     int prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  2567     int prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  2576     int prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info, __u32 *uwrq, char *extra);  2588     int prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra);  2624     int prism54_debug_set_oid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra);  2660     int prism54_set_spy(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *uwrq, char *extra);  2685     iw_handler prism54_handler[55U] = { (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_commit), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_name), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_freq), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_freq), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_mode), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_mode), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_sens), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_sens), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_range), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, &prism54_set_spy, &iw_handler_get_spy, &iw_handler_set_thrspy, &iw_handler_get_thrspy, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_wap), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_wap), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_scan), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_scan), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_essid), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_essid), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_nick), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_nick), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_rate), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_rate), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_rts), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_rts), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_frag), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_frag), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_txpower), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_txpower), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_retry), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_retry), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_encode), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_encode), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_genie), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_genie), &prism54_set_auth, &prism54_get_auth, &prism54_set_encodeext, &prism54_get_encodeext, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0 };  2783     const struct iw_priv_args prism54_private_args[100U] = { { 35808U, 0U, 0U, { 'r', 'e', 's', 'e', 't', '\x0' } }, { 35831U, 0U, 18433U, { 'g', 'e', 't', '_', 'p', 'r', 'i', 's', 'm', 'h', 'd', 'r', '\x0' } }, { 35832U, 18433U, 0U, { 's', 'e', 't', '_', 'p', 'r', 'i', 's', 'm', 'h', 'd', 'r', '\x0' } }, { 35809U, 0U, 18433U, { 'g', 'e', 't', 'P', 'o', 'l', 'i', 'c', 'y', '\x0' } }, { 35810U, 18433U, 0U, { 's', 'e', 't', 'P', 'o', 'l', 'i', 'c', 'y', '\x0' } }, { 35811U, 0U, 24640U, { 'g', 'e', 't', 'M', 'a', 'c', '\x0' } }, { 35812U, 26625U, 0U, { 'a', 'd', 'd', 'M', 'a', 'c', '\x0' } }, { 35814U, 26625U, 0U, { 'd', 'e', 'l', 'M', 'a', 'c', '\x0' } }, { 35816U, 26625U, 0U, { 'k', 'i', 'c', 'k', 'M', 'a', 'c', '\x0' } }, { 35818U, 0U, 0U, { 'k', 'i', 'c', 'k', 'A', 'l', 'l', '\x0' } }, { 35819U, 0U, 18433U, { 'g', 'e', 't', '_', 'w', 'p', 'a', '\x0' } }, { 35820U, 18433U, 0U, { 's', 'e', 't', '_', 'w', 'p', 'a', '\x0' } }, { 35822U, 18433U, 0U, { 'd', 'b', 'g', '_', 'o', 'i', 'd', '\x0' } }, { 35823U, 0U, 4352U, { 'd', 'b', 'g', '_', 'g', 'e', 't', '_', 'o', 'i', 'd', '\x0' } }, { 35824U, 4352U, 0U, { 'd', 'b', 'g', '_', 's', 'e', 't', '_', 'o', 'i', 'd', '\x0' } }, { 35825U, 0U, 11264U, { '\x0' } }, { 35826U, 18433U, 0U, { '\x0' } }, { 35828U, 10241U, 0U, { '\x0' } }, { 35830U, 26625U, 0U, { '\x0' } }, { 0U, 26625U, 0U, { 's', '_', 'a', 'd', 'd', 'r', '\x0' } }, { 0U, 0U, 11264U, { 'g', '_', 'a', 'd', 'd', 'r', '\x0' } }, { 1U, 0U, 11264U, { 'g', '_', 'l', 'i', 'n', 'k', 's', 't', 'a', 't', 'e', '\x0' } }, { 6U, 18433U, 0U, { 's', '_', 'b', 's', 's', 't', 'y', 'p', 'e', '\x0' } }, { 6U, 0U, 11264U, { 'g', '_', 'b', 's', 's', 't', 'y', 'p', 'e', '\x0' } }, { 7U, 26625U, 0U, { 's', '_', 'b', 's', 's', 'i', 'd', '\x0' } }, { 7U, 0U, 11264U, { 'g', '_', 'b', 's', 's', 'i', 'd', '\x0' } }, { 9U, 18433U, 0U, { 's', '_', 's', 't', 'a', 't', 'e', '\x0' } }, { 9U, 0U, 11264U, { 'g', '_', 's', 't', 'a', 't', 'e', '\x0' } }, { 10U, 18433U, 0U, { 's', '_', 'a', 'i', 'd', '\x0' } }, { 10U, 0U, 11264U, { 'g', '_', 'a', 'i', 'd', '\x0' } }, { 12U, 10241U, 0U, { 's', '_', 's', 's', 'i', 'd', 'o', 'v', 'e', 'r', 'r', 'i', 'd', 'e', '\x0' } }, { 12U, 0U, 11264U, { 'g', '_', 's', 's', 'i', 'd', 'o', 'v', 'e', 'r', 'r', 'i', 'd', 'e', '\x0' } }, { 13U, 18433U, 0U, { 's', '_', 'm', 'e', 'd', 'l', 'i', 'm', 'i', 't', '\x0' } }, { 13U, 0U, 11264U, { 'g', '_', 'm', 'e', 'd', 'l', 'i', 'm', 'i', 't', '\x0' } }, { 14U, 18433U, 0U, { 's', '_', 'b', 'e', 'a', 'c', 'o', 'n', '\x0' } }, { 14U, 0U, 11264U, { 'g', '_', 'b', 'e', 'a', 'c', 'o', 'n', '\x0' } }, { 15U, 18433U, 0U, { 's', '_', 'd', 't', 'i', 'm', 'p', 'e', 'r', 'i', 'o', 'd', '\x0' } }, { 15U, 0U, 11264U, { 'g', '_', 'd', 't', 'i', 'm', 'p', 'e', 'r', 'i', 'o', 'd', '\x0' } }, { 20U, 18433U, 0U, { 's', '_', 'a', 'u', 't', 'h', 'e', 'n', 'a', 'b', 'l', 'e', '\x0' } }, { 20U, 0U, 11264U, { 'g', '_', 'a', 'u', 't', 'h', 'e', 'n', 'a', 'b', 'l', 'e', '\x0' } }, { 21U, 18433U, 0U, { 's', '_', 'p', 'r', 'i', 'v', 'i', 'n', 'v', 'o', 'k', '\x0' } }, { 21U, 0U, 11264U, { 'g', '_', 'p', 'r', 'i', 'v', 'i', 'n', 'v', 'o', 'k', '\x0' } }, { 22U, 18433U, 0U, { 's', '_', 'e', 'x', 'u', 'n', 'e', 'n', 'c', 'r', 'y', 'p', 't', '\x0' } }, { 22U, 0U, 11264U, { 'g', '_', 'e', 'x', 'u', 'n', 'e', 'n', 'c', 'r', 'y', 'p', 't', '\x0' } }, { 26U, 18433U, 0U, { 's', '_', 'r', 'e', 'k', 'e', 'y', 't', 'h', 'r', 'e', 's', 'h', '\x0' } }, { 26U, 0U, 11264U, { 'g', '_', 'r', 'e', 'k', 'e', 'y', 't', 'h', 'r', 'e', 's', 'h', '\x0' } }, { 36U, 18433U, 0U, { 's', '_', 'm', 'a', 'x', 't', 'x', 'l', 'i', 'f', 'e', '\x0' } }, { 36U, 0U, 11264U, { 'g', '_', 'm', 'a', 'x', 't', 'x', 'l', 'i', 'f', 'e', '\x0' } }, { 37U, 18433U, 0U, { 's', '_', 'm', 'a', 'x', 'r', 'x', 'l', 'i', 'f', 'e', '\x0' } }, { 37U, 0U, 11264U, { 'g', '_', 'm', 'a', 'x', 'r', 'x', 'l', 'i', 'f', 'e', '\x0' } }, { 44U, 18433U, 0U, { 's', '_', 'f', 'i', 'x', 'e', 'd', 'r', 'a', 't', 'e', '\x0' } }, { 44U, 0U, 11264U, { 'g', '_', 'f', 'i', 'x', 'e', 'd', 'r', 'a', 't', 'e', '\x0' } }, { 48U, 18433U, 0U, { 's', '_', 'f', 'r', 'a', 'm', 'e', 'b', 'u', 'r', 's', 't', '\x0' } }, { 48U, 0U, 11264U, { 'g', '_', 'f', 'r', 'a', 'm', 'e', 'b', 'u', 'r', 's', 't', '\x0' } }, { 49U, 18433U, 0U, { 's', '_', 'p', 's', 'm', '\x0' } }, { 49U, 0U, 11264U, { 'g', '_', 'p', 's', 'm', '\x0' } }, { 53U, 18433U, 0U, { 's', '_', 'b', 'r', 'i', 'd', 'g', 'e', '\x0' } }, { 53U, 0U, 11264U, { 'g', '_', 'b', 'r', 'i', 'd', 'g', 'e', '\x0' } }, { 54U, 18433U, 0U, { 's', '_', 'c', 'l', 'i', 'e', 'n', 't', 's', '\x0' } }, { 54U, 0U, 11264U, { 'g', '_', 'c', 'l', 'i', 'e', 'n', 't', 's', '\x0' } }, { 55U, 18433U, 0U, { 's', '_', 'c', 'l', 'i', 'e', 'n', 't', 'a', 's', 's', 'o', 'c', '\x0' } }, { 55U, 0U, 11264U, { 'g', '_', 'c', 'l', 'i', 'e', 'n', 't', 'a', 's', 's', 'o', 'c', '\x0' } }, { 62U, 18433U, 0U, { 's', '_', 'd', 'o', 't', '1', 'x', 'e', 'n', 'a', 'b', 'l', 'e', '\x0' } }, { 62U, 0U, 11264U, { 'g', '_', 'd', 'o', 't', '1', 'x', 'e', 'n', 'a', 'b', 'l', 'e', '\x0' } }, { 82U, 18433U, 0U, { 's', '_', 'r', 'x', 'a', 'n', 't', '\x0' } }, { 82U, 0U, 11264U, { 'g', '_', 'r', 'x', 'a', 'n', 't', '\x0' } }, { 83U, 18433U, 0U, { 's', '_', 't', 'x', 'a', 'n', 't', '\x0' } }, { 83U, 0U, 11264U, { 'g', '_', 't', 'x', 'a', 'n', 't', '\x0' } }, { 84U, 18433U, 0U, { 's', '_', 'a', 'n', 't', 'd', 'i', 'v', 'e', 'r', 's', '\x0' } }, { 84U, 0U, 11264U, { 'g', '_', 'a', 'n', 't', 'd', 'i', 'v', 'e', 'r', 's', '\x0' } }, { 86U, 18433U, 0U, { 's', '_', 'e', 'd', 't', 'h', 'r', 'e', 's', 'h', '\x0' } }, { 86U, 0U, 11264U, { 'g', '_', 'e', 'd', 't', 'h', 'r', 'e', 's', 'h', '\x0' } }, { 87U, 18433U, 0U, { 's', '_', 'p', 'r', 'e', 'a', 'm', 'b', 'l', 'e', '\x0' } }, { 87U, 0U, 11264U, { 'g', '_', 'p', 'r', 'e', 'a', 'm', 'b', 'l', 'e', '\x0' } }, { 88U, 0U, 11264U, { 'g', '_', 'r', 'a', 't', 'e', 's', '\x0' } }, { 93U, 18433U, 0U, { 's', '_', '.', '1', '1', 'o', 'u', 't', 'p', 'o', 'w', 'e', 'r', '\x0' } }, { 93U, 0U, 11264U, { 'g', '_', '.', '1', '1', 'o', 'u', 't', 'p', 'o', 'w', 'e', 'r', '\x0' } }, { 94U, 0U, 11264U, { 'g', '_', 's', 'u', 'p', 'p', 'r', 'a', 't', 'e', 's', '\x0' } }, { 96U, 0U, 11264U, { 'g', '_', 's', 'u', 'p', 'p', 'f', 'r', 'e', 'q', '\x0' } }, { 97U, 18433U, 0U, { 's', '_', 'n', 'o', 'i', 's', 'e', 'f', 'l', 'o', 'o', 'r', '\x0' } }, { 97U, 0U, 11264U, { 'g', '_', 'n', 'o', 'i', 's', 'e', 'f', 'l', 'o', 'o', 'r', '\x0' } }, { 98U, 0U, 11264U, { 'g', '_', 'f', 'r', 'e', 'q', 'a', 'c', 't', 'i', 'v', 'i', 't', 'y', '\x0' } }, { 100U, 18433U, 0U, { 's', '_', 'n', 'o', 'n', 'e', 'r', 'p', 'p', 'r', 'o', 't', 'e', 'c', '\x0' } }, { 100U, 0U, 11264U, { 'g', '_', 'n', 'o', 'n', 'e', 'r', 'p', 'p', 'r', 'o', 't', 'e', 'c', '\x0' } }, { 103U, 18433U, 0U, { 's', '_', 'p', 'r', 'o', 'f', 'i', 'l', 'e', '\x0' } }, { 103U, 0U, 11264U, { 'g', '_', 'p', 'r', 'o', 'f', 'i', 'l', 'e', '\x0' } }, { 104U, 0U, 11264U, { 'g', '_', 'e', 'x', 't', 'r', 'a', 't', 'e', 's', '\x0' } }, { 120U, 18433U, 0U, { 's', '_', 'm', 'l', 'm', 'e', 'l', 'e', 'v', 'e', 'l', '\x0' } }, { 120U, 0U, 11264U, { 'g', '_', 'm', 'l', 'm', 'e', 'l', 'e', 'v', 'e', 'l', '\x0' } }, { 124U, 0U, 11264U, { 'g', '_', 'b', 's', 's', 's', '\x0' } }, { 127U, 0U, 11264U, { 'g', '_', 'b', 's', 's', 'l', 'i', 's', 't', '\x0' } }, { 131U, 18433U, 0U, { 's', '_', 'm', 'o', 'd', 'e', '\x0' } }, { 131U, 0U, 11264U, { 'g', '_', 'm', 'o', 'd', 'e', '\x0' } }, { 136U, 18433U, 0U, { 's', '_', 'c', 'o', 'n', 'f', 'i', 'g', '\x0' } }, { 136U, 0U, 11264U, { 'g', '_', 'c', 'o', 'n', 'f', 'i', 'g', '\x0' } }, { 137U, 18433U, 0U, { 's', '_', '.', '1', '1', 'd', 'c', 'o', 'n', 'f', 'o', 'r', 'm', '\x0' } }, { 137U, 0U, 11264U, { 'g', '_', '.', '1', '1', 'd', 'c', 'o', 'n', 'f', 'o', 'r', 'm', '\x0' } }, { 138U, 0U, 11264U, { 'g', '_', 'p', 'h', 'y', 'c', 'a', 'p', 'a', '\x0' } }, { 139U, 18433U, 0U, { 's', '_', 'o', 'u', 't', 'p', 'o', 'w', 'e', 'r', '\x0' } }, { 139U, 0U, 11264U, { 'g', '_', 'o', 'u', 't', 'p', 'o', 'w', 'e', 'r', '\x0' } } };  2875     iw_handler prism54_private_handler[25U] = { (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_reset), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_policy), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_policy), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_mac), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_add_mac), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_del_mac), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_kick_mac), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_kick_all), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_wpa), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_wpa), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_debug_oid), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_debug_get_oid), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_debug_set_oid), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_oid), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_u32), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_raw), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))0, (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_raw), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_get_prismhdr), (int (*)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_set_prismhdr) };  2903     const struct iw_handler_def prism54_handler_def = { (const iw_handler  (**)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_handler), 55U, 25U, 100U, (const iw_handler  (**)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *))(&prism54_private_handler), (const struct iw_priv_args *)(&prism54_private_args), &prism54_get_wireless_stats };  2929     void ldv_check_final_state();  2938     void ldv_initialize();  2941     void ldv_handler_precall();  2944     int nondet_int();  2947     int LDV_IN_INTERRUPT = 0;  2950     void ldv_main3_sequence_infinite_withcheck_stateful();   358     extern struct pv_irq_ops pv_irq_ops;    12     struct task_struct * get_current___0();    26     size_t  strlcpy(char *, const char *, size_t );   802     unsigned long int arch_local_save_flags();   155     int arch_irqs_disabled_flags(unsigned long flags);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    39     void _raw_spin_unlock(raw_spinlock_t *);   301     void spin_lock(spinlock_t *lock);   341     void spin_unlock(spinlock_t *lock);    68     void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);    67     void __writel(unsigned int val, volatile void *addr);   174     void * ioremap_nocache(resource_size_t , unsigned long);   182     void * ioremap(resource_size_t offset, unsigned long size);   187     void iounmap(volatile void *);    11     void synchronize_irq(unsigned int);   914     int dev_set_drvdata(struct device *, void *);    70     int is_device_dma_capable(struct device *dev);    53     void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);    56     void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t );    27     extern struct device x86_dma_fallback_dev;    33     void dma_unmap_single_attrs___1(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);   107     unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp);   119     gfp_t  dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp);   135     void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs);   164     void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs);  1877     void free_netdev(struct net_device *);  2077     void netif_start_queue(struct net_device *dev);  2903     int register_netdev(struct net_device *);    16     void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);    23     void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);    30     dma_addr_t  pci_map_single___1(struct pci_dev *hwdev, void *ptr, size_t size, int direction);    36     void pci_unmap_single___1(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);  1412     void pci_set_drvdata(struct pci_dev *pdev, void *data);    46     int eth_change_mtu(struct net_device *, int);    47     int eth_validate_addr(struct net_device *);    49     struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int);    42     int request_firmware(const struct firmware **, const char *, struct device *);    49     void release_firmware(const struct firmware *);   194     islpci_state_t  islpci_set_state(islpci_private *priv, islpci_state_t new_state);   198     irqreturn_t  islpci_interrupt(int irq, void *config);   210     int islpci_free_memory(islpci_private *priv);   211     struct net_device * islpci_setup(struct pci_dev *pdev);    26     int mgt_init(islpci_private *priv);    28     void mgt_clean(islpci_private *priv);    50     int prism54_bring_down(islpci_private *priv);    51     int islpci_alloc_memory(islpci_private *priv);    60     const unsigned char dummy_mac[6U] = { 0U, 48U, 180U, 0U, 0U, 0U };    63     int isl_upload_firmware(islpci_private *priv);   382     int islpci_open(struct net_device *ndev);   410     int islpci_close(struct net_device *ndev);   461     int islpci_upload_fw(islpci_private *priv);   489     int islpci_reset_if(islpci_private *priv);   793     void islpci_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);   800     const struct ethtool_ops islpci_ethtool_ops = { 0, 0, &islpci_ethtool_get_drvinfo, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   804     const struct net_device_ops islpci_netdev_ops = { 0, 0, &islpci_open, &islpci_close, &islpci_eth_transmit, 0, 0, 0, &prism54_set_mac_address, ð_validate_addr, 0, 0, ð_change_mtu, 0, &islpci_eth_tx_timeout, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   814     struct device_type wlan_type = { "wlan", 0, 0, 0, 0, 0 };   985     void ldv_check_return_value(int);  1003     void ldv_main4_sequence_infinite_withcheck_stateful();    33     extern struct module __this_module;   123     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   128     int request_irq(unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long flags, const char *name, void *dev);   142     void free_irq(unsigned int, void *);   913     void * dev_get_drvdata(const struct device *);   820     int pci_bus_read_config_byte(struct pci_bus *, unsigned int, int, u8 *);   824     int pci_bus_read_config_dword(struct pci_bus *, unsigned int, int, u32 *);   826     int pci_bus_write_config_byte(struct pci_bus *, unsigned int, int, u8 );   834     int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);   842     int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);   847     int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);   902     int pci_enable_device(struct pci_dev *);   919     void pci_disable_device(struct pci_dev *);   922     void pci_set_master(struct pci_dev *);   929     int pci_try_set_mwi(struct pci_dev *);   930     void pci_clear_mwi(struct pci_dev *);   975     int pci_save_state(struct pci_dev *);   976     void pci_restore_state(struct pci_dev *);  1040     int pci_request_regions(struct pci_dev *, const char *);  1042     void pci_release_regions(struct pci_dev *);  1085     int __pci_register_driver(struct pci_driver *, struct module *, const char *);  1094     void pci_unregister_driver(struct pci_driver *);    65     int dma_set_mask(struct device *, u64 );   105     int pci_set_dma_mask(struct pci_dev *dev, u64 mask);  1407     void * pci_get_drvdata(struct pci_dev *pdev);  2680     void netif_device_detach(struct net_device *);  2682     void netif_device_attach(struct net_device *);  2904     void unregister_netdev(struct net_device *);   104     void __bug_on_wrong_struct_sizes();    36     int init_pcitm = 0;    43     const struct pci_device_id prism54_id_tbl[5U] = { { 4704U, 14480U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4279U, 24577U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4704U, 14455U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4704U, 14470U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };    75     const struct pci_device_id __mod_pci_device_table = {  };    77     int prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id);    78     void prism54_remove(struct pci_dev *pdev);    79     int prism54_suspend(struct pci_dev *pdev, pm_message_t state);    80     int prism54_resume(struct pci_dev *pdev);    82     struct pci_driver prism54_driver = { { 0, 0 }, "prism54", (const struct pci_device_id *)(&prism54_id_tbl), &prism54_probe, &prism54_remove, &prism54_suspend, 0, 0, &prism54_resume, 0, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } };   214     volatile int __in_cleanup_module = 0;   312     int prism54_module_init();   326     void prism54_module_exit();   364     void ldv_check_return_value_probe(int);   379     void ldv_main5_sequence_infinite_withcheck_stateful();    67     void __init_rwsem(struct rw_semaphore *, const char *, struct lock_class_key *);   560     void * kmalloc_array(size_t n, size_t size, gfp_t flags);   573     void * kcalloc(size_t n, size_t size, gfp_t flags);    29     const int frequency_list_bg[14U] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 };    57     struct oid_t isl_oid[140U] = { { 0, 0, 6, 10 }, { 1, 0, 4, 1 }, { 2, 0, 4, 0 }, { 3, 0, 4, 0 }, { 4, 0, 4, 0 }, { 5, 0, 4, 0 }, { 268435456, 0, 4, -127 }, { 268435457, 0, 6, -117 }, { 268435458, 0, 34, -126 }, { 268435459, 0, 4, 1 }, { 268435460, 0, 4, 1 }, { 268435461, 0, 4, 11 }, { 268435462, 0, 34, -126 }, { 285212672, 0, 4, 1 }, { 285212673, 0, 4, -127 }, { 285212674, 0, 4, 1 }, { 285212675, 0, 4, 1 }, { 285212676, 0, 4, 1 }, { 285212677, 0, 4, 1 }, { 285212678, 0, 4, 1 }, { 301989888, 0, 4, -127 }, { 301989889, 0, 4, -127 }, { 301989890, 0, 4, -127 }, { 301989891, 0, 4, -127 }, { 301989892, 3, 34, -125 }, { 301989896, 0, 4, 0 }, { 301989897, 0, 4, 1 }, { 301989898, 0, 4, 0 }, { 436207616, 0, 4, 1 }, { 436207617, 0, 4, 1 }, { 436207618, 0, 4, 1 }, { 436207619, 0, 4, 1 }, { 318767104, 0, 4, -127 }, { 318767105, 0, 4, -127 }, { 318767106, 0, 4, -127 }, { 318767107, 0, 4, -127 }, { 318767108, 0, 4, -127 }, { 318767109, 0, 4, 1 }, { 318767110, 0, 4, 1 }, { 318767111, 0, 4, 1 }, { 486539264, 0, 4, 0 }, { 486539265, 0, 4, 0 }, { 486539266, 0, 4, 0 }, { 486539267, 0, 4, 0 }, { 486539268, 0, 4, 1 }, { 486539269, 0, 4, 0 }, { 486539270, 0, 4, 0 }, { 452984832, 7, 0, 0 }, { 452984840, 0, 4, 1 }, { 335544320, 0, 4, 1 }, { 335544321, 0, 4, 1 }, { 335544322, 0, 4, 1 }, { 335544323, 0, 4, 1 }, { 352321536, 0, 4, 1 }, { 352321537, 0, 4, 1 }, { 352321538, 0, 4, 1 }, { 352321539, 2006, 0, 0 }, { 352323547, 0, 6, 10 }, { 352323548, 0, 6, 10 }, { 352323549, 0, 6, 10 }, { 352323550, 0, 6, 10 }, { 352323551, 0, 6, 10 }, { 352323552, 0, 4, -127 }, { 352323553, 0, 4, 0 }, { 352323554, 0, 4, 0 }, { 369098752, 0, 4, 1 }, { 369098753, 0, 4, 1 }, { 369098754, 0, 4, 1 }, { 369098755, 0, 4, 1 }, { 369098756, 0, 4, 1 }, { 369098757, 0, 4, 1 }, { 369098758, 0, 4, 1 }, { 369098759, 0, 4, 1 }, { 369098760, 0, 4, 1 }, { 369098761, 0, 4, 1 }, { 369098762, 0, 4, 1 }, { 369098763, 0, 4, 1 }, { 369098764, 0, 4, 1 }, { 385875968, 0, 4, 1 }, { 385875969, 0, 4, 1 }, { 385875970, 0, 4, 1 }, { 385875971, 0, 4, 1 }, { 385875972, 0, 4, 1 }, { 385875973, 0, 4, 1 }, { 385875974, 0, 4, 1 }, { 385875975, 0, 4, -127 }, { 385875976, 0, 4, -127 }, { 385875977, 0, 4, 1 }, { 385875978, 0, 21, 11 }, { 385875979, 0, 4, 1 }, { 385875980, 0, 4, 1 }, { 385875981, 0, 4, 0 }, { 385875982, 0, 4, 0 }, { 385875983, 0, 4, 1 }, { 385875984, 0, 21, 11 }, { 385875985, 0, 4, -127 }, { 385875986, 0, 62, 7 }, { 385875987, 0, 4, 1 }, { 385875988, 0, 31, 11 }, { 385875989, 0, 4, 0 }, { 385875990, 0, 4, 1 }, { 385875991, 0, 4, 1 }, { 385875992, 0, 4, 1 }, { 385875993, 0, 4, 1 }, { 385876000, 0, 21, 11 }, { 402653184, 0, 12, 8 }, { 402653185, 0, 12, 8 }, { 402653186, 0, 12, 8 }, { 402653187, 0, 12, 8 }, { 402653188, 0, 4, 0 }, { 402653189, 0, 14, 9 }, { 402653190, 0, 14, 9 }, { 402653191, 0, 14, 9 }, { 402653192, 0, 14, 9 }, { 402653193, 0, 14, 9 }, { 402653194, 0, 14, 9 }, { 402653195, 0, 14, 9 }, { 402653196, 0, 14, 9 }, { 503316480, 0, 4, 1 }, { 419430400, 0, 4, 1 }, { 419430401, 0, 4, -127 }, { 419430402, 0, 4, 1 }, { 419430403, 0, 6, 12 }, { 419430404, 0, 8, -124 }, { 469762048, 0, 4, 1 }, { 469762049, 63, 60, 5 }, { 469762114, 0, 60, 5 }, { 469762115, 0, 1444, 6 }, { 4278321152U, 0, 4, 0 }, { 4278321153U, 0, 4, 0 }, { 4278321154U, 0, 4, 0 }, { 4278321155U, 0, 4, -127 }, { 4278321156U, 0, 4, 0 }, { 4278321157U, 0, 8, 11 }, { 4278321158U, 0, 4, 0 }, { 4278321159U, 0, 4, 0 }, { 4278321160U, 0, 4, -127 }, { 4278321164U, 0, 4, -127 }, { 4278321165U, 0, 4, 1 }, { 4278321167U, 0, 4, -127 } };   347     void mgt_cpu_to_le(int type, void *data);   649     enum oid_num_t commit_part1[5U] = { 136, 131, 6, 85, 120 };   657     enum oid_num_t commit_part2[9U] = { 8, 123, 20, 21, 22, 24, 23, 62, 137 };   674     int mgt_update_addr(islpci_private *priv);    10     void ldv_error();    25     int ldv_undef_int();    26     void * ldv_undef_ptr();     8     int LDV_DMA_MAP_CALLS = 0;    11     dma_addr_t  ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);    25     int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);    41     dma_addr_t  ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir);           return ;         }        {       381     struct pci_dev *var_group1;   382     const struct pci_device_id *var_prism54_probe_0_p1;   383     int res_prism54_probe_0;   384     struct pm_message var_prism54_suspend_2_p1;   385     int ldv_s_prism54_driver_pci_driver;   386     int tmp;   387     int tmp___0;   388     int tmp___1;   439     ldv_s_prism54_driver_pci_driver = 0;   418     LDV_IN_INTERRUPT = 1;   436     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {   314       int tmp;   314       printk("\016Loaded %s driver, version %s\n", (char *)"prism54", (char *)"1.2") { /* Function call is skipped due to function is undefined */}             {   106         return ;;             }  319       tmp = __pci_register_driver(&prism54_driver, &__this_module, "prism54") { /* Function call is skipped due to function is undefined */}           }  443     goto ldv_43278;   443     tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}   446     goto ldv_43277;   444     ldv_43277:;   447     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}   447     switch (tmp___0)  520     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {         }  285       struct net_device *ndev;   286       void *tmp;   287       islpci_private *priv;   288       void *tmp___0;   289       islpci_private *tmp___1;   290       int err;   291       long tmp___2;             {  1409         void *tmp;  1409         tmp = dev_get_drvdata((const struct device *)(&(pdev->dev))) { /* Function call is skipped due to function is undefined */}             }  285       ndev = (struct net_device *)tmp;   286       tmp___1 = (islpci_private *)0;   286       priv = tmp___1;   289       tmp___2 = __builtin_expect(((unsigned long)priv) == ((unsigned long)((islpci_private *)0)), 0L) { /* Function call is skipped due to function is undefined */}   291       printk("\r%s: got resume request\n", (char *)(&(ndev->name))) { /* Function call is skipped due to function is undefined */}   293       err = pci_enable_device(pdev) { /* Function call is skipped due to function is undefined */}   300       pci_restore_state(pdev) { /* Function call is skipped due to function is undefined */}             {           }  554         isl38xx_control_block *cb;   555         unsigned int counter;   556         int rc;   557         isl38xx_fragment *frag;   554         cb = priv->control_block;               {   925           enum ldv_30720 old_state;   926           long tmp;   927           long tmp___0;   928           long tmp___1;   929           long tmp___2;   930           long tmp___3;   928           old_state = priv->state;   932           switch ((unsigned int)new_state)  950           goto ldv_43293;   952           ldv_43293:;   958           tmp = __builtin_expect((priv->state_off) < 0, 0L) { /* Function call is skipped due to function is undefined */}   959           tmp___0 = __builtin_expect((priv->state_off) != 0, 0L) { /* Function call is skipped due to function is undefined */}   960           tmp___2 = __builtin_expect((priv->state_off) == 0, 0L) { /* Function call is skipped due to function is undefined */}               }  564         printk("\017%s: resetting device...\n", (char *)(&(priv->ndev->name))) { /* Function call is skipped due to function is undefined */}               {                 {                   { 63 Ignored inline assembler code    64               return ;;                   }                  {    57               unsigned int ret;    55               Ignored inline assembler code   55               return ret;;                   }   79             return ;;                 }   47           __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}               }  570         priv->index_mgmt_tx = 0U;   571         priv->index_mgmt_rx = 0U;   574         counter = 0U;   574         goto ldv_43224;   576         goto ldv_43223;   575         ldv_43223:;   575         (cb->driver_curr_frag)[counter] = 0U;   576         (cb->device_curr_frag)[counter] = 0U;   574         counter = counter + 1U;   575         ldv_43224:;   576         goto ldv_43223;   575         ldv_43223:;   575         (cb->driver_curr_frag)[counter] = 0U;   576         (cb->device_curr_frag)[counter] = 0U;   574         counter = counter + 1U;   575         ldv_43224:;   576         goto ldv_43223;   575         ldv_43223:;   575         (cb->driver_curr_frag)[counter] = 0U;   576         (cb->device_curr_frag)[counter] = 0U;   574         counter = counter + 1U;   575         ldv_43224:;   576         goto ldv_43223;   575         ldv_43223:;   575         (cb->driver_curr_frag)[counter] = 0U;   576         (cb->device_curr_frag)[counter] = 0U;   574         counter = counter + 1U;   575         ldv_43224:;   576         goto ldv_43223;   575         ldv_43223:;   575         (cb->driver_curr_frag)[counter] = 0U;   576         (cb->device_curr_frag)[counter] = 0U;   574         counter = counter + 1U;   575         ldv_43224:;   576         goto ldv_43223;   575         ldv_43223:;   575         (cb->driver_curr_frag)[counter] = 0U;   576         (cb->device_curr_frag)[counter] = 0U;   574         counter = counter + 1U;   575         ldv_43224:;   580         counter = 0U;   580         goto ldv_43228;   582         goto ldv_43227;   581         ldv_43227:;   581         frag = ((isl38xx_fragment *)(&(cb->rx_data_mgmt))) + ((unsigned long)counter);   582         frag->size = 1500U;   583         frag->flags = 0U;   584         frag->address = (unsigned int)(((priv->mgmt_rx)[counter]).pci_addr);   580         counter = counter + 1U;   581         ldv_43228:;   582         goto ldv_43227;   581         ldv_43227:;   581         frag = ((isl38xx_fragment *)(&(cb->rx_data_mgmt))) + ((unsigned long)counter);   582         frag->size = 1500U;   583         frag->flags = 0U;   584         frag->address = (unsigned int)(((priv->mgmt_rx)[counter]).pci_addr);   580         counter = counter + 1U;   581         ldv_43228:;   582         goto ldv_43227;   581         ldv_43227:;   581         frag = ((isl38xx_fragment *)(&(cb->rx_data_mgmt))) + ((unsigned long)counter);   582         frag->size = 1500U;   583         frag->flags = 0U;   584         frag->address = (unsigned int)(((priv->mgmt_rx)[counter]).pci_addr);   580         counter = counter + 1U;   581         ldv_43228:;   582         goto ldv_43227;   581         ldv_43227:;   581         frag = ((isl38xx_fragment *)(&(cb->rx_data_mgmt))) + ((unsigned long)counter);   582         frag->size = 1500U;   583         frag->flags = 0U;   584         frag->address = (unsigned int)(((priv->mgmt_rx)[counter]).pci_addr);   580         counter = counter + 1U;   581         ldv_43228:;   587         counter = 0U;   587         goto ldv_43231;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   589         goto ldv_43230;   588         ldv_43230:;   588         ((cb->rx_data_low)[counter]).address = (unsigned int)((priv->pci_map_rx_address)[counter]);   587         counter = counter + 1U;   588         ldv_43231:;   594         (priv->control_block->driver_curr_frag)[0] = 8U;   596         (priv->control_block->driver_curr_frag)[4] = 4U;   600         priv->free_data_rx = 0U;   601         priv->free_data_tx = 0U;   602         priv->data_low_tx_full = 0U;               {   463           enum ldv_30720 old_state;   464           unsigned int rc;   465           int tmp;                 {   925             enum ldv_30720 old_state;   926             long tmp;   927             long tmp___0;   928             long tmp___1;   929             long tmp___2;   930             long tmp___3;   928             old_state = priv->state;   932             switch ((unsigned int)new_state)default   936             priv->state = new_state;   937             goto ldv_43293;   952             ldv_43293:;   958             tmp = __builtin_expect((priv->state_off) < 0, 0L) { /* Function call is skipped due to function is undefined */}   959             tmp___0 = __builtin_expect((priv->state_off) != 0, 0L) { /* Function call is skipped due to function is undefined */}   960             tmp___2 = __builtin_expect((priv->state_off) == 0, 0L) { /* Function call is skipped due to function is undefined */}                 }  468           printk("\017%s: uploading firmware...\n", (char *)(&(priv->ndev->name))) { /* Function call is skipped due to function is undefined */}                 {    65             unsigned int reg;    66             unsigned int rc;    67             void *device_base;    68             unsigned long __ms;    69             unsigned long tmp;    70             const struct firmware *fw_entry;    71             long fw_len;    72             const u32 *fw_ptr;    73             int tmp___0;    74             long _fw_len;    75             u32 *dev_fw_ptr;    76             long tmp___1;    77             long tmp___2;    66             device_base = priv->device_base;                   {    57               unsigned int ret;    55               Ignored inline assembler code   55               return ret;;                   }   70             reg = reg & 4026531839U;    71             reg = reg & 3758096383U;                   {    73             Ignored inline assembler code63 Ignored inline assembler code    64               return ;;                   }   74             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}    77             reg = reg | 268435456U;                   {    79             Ignored inline assembler code63 Ignored inline assembler code    64               return ;;                   }   80             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}    83             reg = reg & 4026531839U;                   {    85             Ignored inline assembler code63 Ignored inline assembler code    64               return ;;                   }   88             __ms = 50UL;    88             goto ldv_43160;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    90             goto ldv_43159;    89             ldv_43159:;    88             __const_udelay(4295000UL) { /* Function call is skipped due to function is undefined */}    89             ldv_43160:;    88             tmp = __ms;    88             __ms = __ms - 1UL;    91             fw_entry = (const struct firmware *)0;    95             tmp___0 = request_firmware(&fw_entry, (const char *)(&(priv->firmware)), &(priv->pdev->dev)) { /* Function call is skipped due to function is undefined */}    95             rc = (u32 )tmp___0;   103             reg = 131072U;   105             const u32 *__CPAchecker_TMP_0 = (const u32 *)(fw_entry->data);   105             fw_ptr = __CPAchecker_TMP_0;   106             long __CPAchecker_TMP_1 = (long)(fw_entry->size);   106             fw_len = __CPAchecker_TMP_1;   116             goto ldv_43172;   148             tmp___2 = __builtin_expect(fw_len != 0L, 0L) { /* Function call is skipped due to function is undefined */}   151             printk("\017%s: firmware version: %.8s\n", (char *)(&(priv->ndev->name)), (fw_entry->data) + 40UL) { /* Function call is skipped due to function is undefined */}   154             release_firmware(fw_entry) { /* Function call is skipped due to function is undefined */}                   {    57               unsigned int ret;    55               Ignored inline assembler code   55               return ret;;                   }  160             reg = reg & 4286578687U;   161             reg = reg & 4026531839U;   162             reg = reg | 536870912U;                   {   164             Ignored inline assembler code                    { 63 Ignored inline assembler code    64                 return ;;                     }                    {    57                 unsigned int ret;    55                 Ignored inline assembler code   55                 return ret;;                     }   79               return ;;                   }  165             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}   169             reg = reg | 268435456U;                   {   172             Ignored inline assembler code63 Ignored inline assembler code    64               return ;;                   }  173             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}   176             reg = reg & 4026531839U;                   {   179             Ignored inline assembler code63 Ignored inline assembler code    64               return ;;                   }  180             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}                 }  470           rc = (u32 )tmp;   480           printk("\017%s: firmware upload complete\n", (char *)(&(priv->ndev->name))) { /* Function call is skipped due to function is undefined */}                 {               }  925             enum ldv_30720 old_state;   926             long tmp;   927             long tmp___0;   928             long tmp___1;   929             long tmp___2;   930             long tmp___3;   928             old_state = priv->state;   932             switch ((unsigned int)new_state)default   936             priv->state = new_state;   937             goto ldv_43293;   952             ldv_43293:;   958             tmp = __builtin_expect((priv->state_off) < 0, 0L) { /* Function call is skipped due to function is undefined */}   959             tmp___0 = __builtin_expect((priv->state_off) != 0, 0L) { /* Function call is skipped due to function is undefined */}   960             tmp___2 = __builtin_expect((priv->state_off) == 0, 0L) { /* Function call is skipped due to function is undefined */}                 }              {             }  491           long remaining;   492           int result;   493           int count;   494           struct __wait_queue wait;   495           struct task_struct *tmp;   492           result = -62;                 {    14             struct task_struct *pfo_ret__;    14             switch (8UL)15 __case__[8UL == 8UL] 14 Ignored inline assembler code    14             goto ldv_3044;    14             return pfo_ret__;;                 }  495           wait.flags = 0U;   495           wait.private = (void *)tmp;   495           wait.func = &autoremove_wake_function;   495           wait.task_list.next = &(wait.task_list);   495           wait.task_list.prev = &(wait.task_list);   496           prepare_to_wait(&(priv->reset_done), &wait, 2) { /* Function call is skipped due to function is undefined */}                 {                   {                     { 63 Ignored inline assembler code    64                 return ;;                     }                    {    57                 unsigned int ret;    55                 Ignored inline assembler code   55                 return ret;;                     }   79               return ;;                   }  196             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}                   {                     { 63 Ignored inline assembler code    64                 return ;;                     }                    {    57                 unsigned int ret;    55                 Ignored inline assembler code   55                 return ret;;                     }   79               return ;;                   }  200             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}                   {                     { 63 Ignored inline assembler code    64                 return ;;                     }                    {    57                 unsigned int ret;    55                 Ignored inline assembler code   55                 return ret;;                     }   79               return ;;                   }  208             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}                 }                {   925             enum ldv_30720 old_state;   926             long tmp;   927             long tmp___0;   928             long tmp___1;   929             long tmp___2;   930             long tmp___3;   928             old_state = priv->state;   932             switch ((unsigned int)new_state)default   936             priv->state = new_state;   937             goto ldv_43293;   952             ldv_43293:;   958             tmp = __builtin_expect((priv->state_off) < 0, 0L) { /* Function call is skipped due to function is undefined */}   959             tmp___0 = __builtin_expect((priv->state_off) != 0, 0L) { /* Function call is skipped due to function is undefined */}   960             tmp___2 = __builtin_expect((priv->state_off) == 0, 0L) { /* Function call is skipped due to function is undefined */}                 }  502           count = 0;   502           goto ldv_43215;   504           goto ldv_43214;   503           ldv_43214:;   506           remaining = schedule_timeout_uninterruptible(250L) { /* Function call is skipped due to function is undefined */}   509           result = 0;   510           goto ldv_43213;   520           finish_wait(&(priv->reset_done), &wait) { /* Function call is skipped due to function is undefined */}                 {   925             enum ldv_30720 old_state;   926             long tmp;   927             long tmp___0;   928             long tmp___1;   929             long tmp___2;   930             long tmp___3;   928             old_state = priv->state;   932             switch ((unsigned int)new_state)default   936             priv->state = new_state;   937             goto ldv_43293;   952             ldv_43293:;   958             tmp = __builtin_expect((priv->state_off) < 0, 0L) { /* Function call is skipped due to function is undefined */}   959             tmp___0 = __builtin_expect((priv->state_off) != 0, 0L) { /* Function call is skipped due to function is undefined */}   960             tmp___2 = __builtin_expect((priv->state_off) == 0, 0L) { /* Function call is skipped due to function is undefined */}                 }                {   214             unsigned int reg;   216             reg = 26U;                   {                     { 63 Ignored inline assembler code    64                 return ;;                     }                    {    57                 unsigned int ret;    55                 Ignored inline assembler code   55                 return ret;;                     }   79               return ;;                   }  219             __const_udelay(42950UL) { /* Function call is skipped due to function is undefined */}                 }  536           down_write(&(priv->mib_sem)) { /* Function call is skipped due to function is undefined */}                 {               }  698             int rvalue;   699             enum oid_num_t u;   700             enum ldv_30720 tmp;   701             int tmp___0;   702             int tmp___1;   703             int tmp___2;                   {   190               return priv->state;;                   }                  {                 }  594               int i;   595               int ret;   596               struct islpci_mgmtframe *response;   597               struct oid_t *t;   598               void *data;   599               int j;   600               unsigned int oid;   601               long tmp;   602               int r;   603               int tmp___0;   594               ret = 0;   597               i = 0;   597               goto ldv_40593;   599               goto ldv_40592;   598               ldv_40592:;   598               t = ((struct oid_t *)(&isl_oid)) + ((unsigned long)(*(l + ((unsigned long)i))));   599               data = *((priv->mib) + ((unsigned long)(*(l + ((unsigned long)i)))));   600               j = 0;   601               oid = t->oid;   602               tmp = __builtin_expect(((unsigned long)data) == ((unsigned long)((void *)0)), 0L) { /* Function call is skipped due to function is undefined */}   603               goto ldv_40590;   603               int __CPAchecker_TMP_3 = (int)(t->range);   605               goto ldv_40589;   604               ldv_40589:;   604               int __CPAchecker_TMP_0 = (int)(t->size);                     {   445                 islpci_private *priv;   446                 void *tmp;   447                 long wait_cycle_jiffies;   448                 unsigned long tmp___0;   449                 long timeout_left;   450                 int err;   451                 struct __wait_queue wait;   452                 struct task_struct *tmp___1;   453                 int tmp___2;   454                 int timeleft;   455                 struct islpci_mgmtframe *frame;   456                 long tmp___3;   457                 struct islpci_mgmtframe *__ret;   447                 priv = (islpci_private *)tmp;   448                 tmp___0 = msecs_to_jiffies(100U) { /* Function call is skipped due to function is undefined */}   448                 wait_cycle_jiffies = (const long)tmp___0;   449                 timeout_left = wait_cycle_jiffies * 10L;                       {    14                   struct task_struct *pfo_ret__;    14                   switch (8UL)15 __case__[8UL == 8UL] 14 Ignored inline assembler code    14                   goto ldv_2810;    14                   return pfo_ret__;;                       }  451                 wait.flags = 0U;   451                 wait.private = (void *)tmp___1;   451                 wait.func = &autoremove_wake_function;   451                 wait.task_list.next = &(wait.task_list);   451                 wait.task_list.prev = &(wait.task_list);   453                 *recvframe = (struct islpci_mgmtframe *)0;   455                 tmp___2 = mutex_lock_interruptible_nested(&(priv->mgmt_lock), 0U) { /* Function call is skipped due to function is undefined */}   458                 prepare_to_wait(&(priv->mgmt_wqueue), &wait, 2) { /* Function call is skipped due to function is undefined */}                       {   166                   islpci_private *priv;   167                   void *tmp;   168                   isl38xx_control_block *cb;   169                   void *p;   170                   int err;   171                   unsigned long flags;   172                   isl38xx_fragment *frag;   173                   struct islpci_membuf buf;   174                   unsigned int curr_frag;   175                   int index;   176                   int frag_len;   177                   unsigned long __len;   178                   void *__ret;   179                   raw_spinlock_t *tmp___0;   166                   priv = (islpci_private *)tmp;   167                   cb = priv->control_block;   170                   err = -22;   176                   frag_len = length + 12;   188                   err = -12;                         {   443                     void *tmp___2;   458                     tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}                         }  189                   p = buf.mem;   193                   buf.size = frag_len;                         {    72                     unsigned int tmp;    73                     unsigned int tmp___0;    72                     h->version = 1U;    73                     h->operation = (u8 )operation;    74                     h->device_id = 0U;    75                     h->flags = 0U;    76                     h->oid = tmp;    77                     h->length = tmp___0;                         }  197                   p = p + 12UL;   202                   memset(p, 0, (size_t )length) { /* Function call is skipped due to function is undefined */}   217                   err = -12;                         {    33                     unsigned long long tmp;    32                     struct device *__CPAchecker_TMP_0;    32                     __CPAchecker_TMP_0 = (struct device *)0;    32                     -ldv_dma_map_single_attrs_1(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, (struct dma_attrs *)0)                           {                         }   14                       unsigned long long tmp;                             {                           }   58                         unsigned long long nonedetermined;    59                         void *tmp;    58                         tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */}    58                         nonedetermined = (dma_addr_t )tmp;    63                         LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1;                             }  227                   flags = _raw_spin_lock_irqsave(tmp___0) { /* Function call is skipped due to function is undefined */}   228                   curr_frag = (cb->driver_curr_frag)[5];   236                   index = ((int)curr_frag) & 3;   237                   (priv->mgmt_tx)[index] = buf;   238                   frag = ((isl38xx_fragment *)(&(cb->tx_data_mgmt))) + ((unsigned long)index);   239                   frag->size = (unsigned short)frag_len;   240                   frag->flags = 0U;   241                   frag->address = (unsigned int)(buf.pci_addr);   246                   Ignored inline assembler code  247                   (cb->driver_curr_frag)[5] = curr_frag + 1U;                         {   358                     _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}                         }                        {                       }  206                     enum ldv_30720 tmp;                           {   190                       return priv->state;;                           }                          {   112                       unsigned int reg;                             {                           }                              { 63 Ignored inline assembler code    64                           return ;;                               }                              {    57                           unsigned int ret;    55                           Ignored inline assembler code   55                           return ret;;                               }   79                         return ;;                             }  208                     return ;;                         }  463                 err = -110;   464                 goto ldv_43008;   493                 printk("\f%s: timeout waiting for mgmt response\n", (char *)(&(ndev->name))) { /* Function call is skipped due to function is undefined */}   494                 out:;   498                 finish_wait(&(priv->mgmt_wqueue), &wait) { /* Function call is skipped due to function is undefined */}   499                 mutex_unlock(&(priv->mgmt_lock)) { /* Function call is skipped due to function is undefined */}                     }  604               r = tmp___0;   612               printk("\v%s: mgt_commit_list: failure. oid=%08x err=%d\n", (char *)(&(priv->ndev->name)), oid, r) { /* Function call is skipped due to function is undefined */}   615               ret = ret | r;   616               j = j + 1;   617               oid = oid + 1U;   618               unsigned long __CPAchecker_TMP_2 = (unsigned long)(t->size);   618               data = data + __CPAchecker_TMP_2;   619               ldv_40590:;   603               int __CPAchecker_TMP_3 = (int)(t->range);   597               i = i + 1;   598               ldv_40593:;   599               goto ldv_40592;   598               ldv_40592:;   598               t = ((struct oid_t *)(&isl_oid)) + ((unsigned long)(*(l + ((unsigned long)i))));   599               data = *((priv->mib) + ((unsigned long)(*(l + ((unsigned long)i)))));   600               j = 0;   601               oid = t->oid;   602               tmp = __builtin_expect(((unsigned long)data) == ((unsigned long)((void *)0)), 0L) { /* Function call is skipped due to function is undefined */}   603               goto ldv_40590;   603               int __CPAchecker_TMP_3 = (int)(t->range);   605               goto ldv_40589;   604               ldv_40589:;   604               int __CPAchecker_TMP_0 = (int)(t->size);                     {                   }  445                 islpci_private *priv;   446                 void *tmp;   447                 long wait_cycle_jiffies;   448                 unsigned long tmp___0;   449                 long timeout_left;   450                 int err;   451                 struct __wait_queue wait;   452                 struct task_struct *tmp___1;   453                 int tmp___2;   454                 int timeleft;   455                 struct islpci_mgmtframe *frame;   456                 long tmp___3;   457                 struct islpci_mgmtframe *__ret;   447                 priv = (islpci_private *)tmp;   448                 tmp___0 = msecs_to_jiffies(100U) { /* Function call is skipped due to function is undefined */}   448                 wait_cycle_jiffies = (const long)tmp___0;   449                 timeout_left = wait_cycle_jiffies * 10L;                       {    14                   struct task_struct *pfo_ret__;    14                   switch (8UL)15 __case__[8UL == 8UL] 14 Ignored inline assembler code    14                   goto ldv_2810;    14                   return pfo_ret__;;                       }  451                 wait.flags = 0U;   451                 wait.private = (void *)tmp___1;   451                 wait.func = &autoremove_wake_function;   451                 wait.task_list.next = &(wait.task_list);   451                 wait.task_list.prev = &(wait.task_list);   453                 *recvframe = (struct islpci_mgmtframe *)0;   455                 tmp___2 = mutex_lock_interruptible_nested(&(priv->mgmt_lock), 0U) { /* Function call is skipped due to function is undefined */}   458                 prepare_to_wait(&(priv->mgmt_wqueue), &wait, 2) { /* Function call is skipped due to function is undefined */}                       {                     }  166                   islpci_private *priv;   167                   void *tmp;   168                   isl38xx_control_block *cb;   169                   void *p;   170                   int err;   171                   unsigned long flags;   172                   isl38xx_fragment *frag;   173                   struct islpci_membuf buf;   174                   unsigned int curr_frag;   175                   int index;   176                   int frag_len;   177                   unsigned long __len;   178                   void *__ret;   179                   raw_spinlock_t *tmp___0;   166                   priv = (islpci_private *)tmp;   167                   cb = priv->control_block;   170                   err = -22;   176                   frag_len = length + 12;   188                   err = -12;                         {   443                     void *tmp___2;   458                     tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}                         }  189                   p = buf.mem;   193                   buf.size = frag_len;                         {    72                     unsigned int tmp;    73                     unsigned int tmp___0;    72                     h->version = 1U;    73                     h->operation = (u8 )operation;    74                     h->device_id = 0U;    75                     h->flags = 0U;    76                     h->oid = tmp;    77                     h->length = tmp___0;                         }  197                   p = p + 12UL;   200                   __len = (size_t )length;   200                   __ret = __builtin_memcpy(p, (const void *)data, __len) { /* Function call is skipped due to function is undefined */}   217                   err = -12;                         {                       }   33                     unsigned long long tmp;    32                     struct device *__CPAchecker_TMP_0;    32                     __CPAchecker_TMP_0 = (struct device *)0;    32                     -ldv_dma_map_single_attrs_1(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, (struct dma_attrs *)0)                           {                         }   14                       unsigned long long tmp;                             {                           }   58                         unsigned long long nonedetermined;    59                         void *tmp;    58                         tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */}    58                         nonedetermined = (dma_addr_t )tmp;                             } |              Source code         
     1 /*
    2  *  Copyright (C) 2002 Intersil Americas Inc.
    3  *  Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_
    4  *
    5  *  This program is free software; you can redistribute it and/or modify
    6  *  it under the terms of the GNU General Public License as published by
    7  *  the Free Software Foundation; either version 2 of the License
    8  *
    9  *  This program is distributed in the hope that it will be useful,
   10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  *  GNU General Public License for more details.
   13  *
   14  *  You should have received a copy of the GNU General Public License
   15  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
   16  *
   17  */
   18 
   19 #include <linux/module.h>
   20 #include <linux/types.h>
   21 #include <linux/delay.h>
   22 
   23 #include <asm/uaccess.h>
   24 #include <asm/io.h>
   25 
   26 #include "prismcompat.h"
   27 #include "isl_38xx.h"
   28 #include "islpci_dev.h"
   29 #include "islpci_mgt.h"
   30 
   31 /******************************************************************************
   32     Device Interface & Control functions
   33 ******************************************************************************/
   34 
   35 /**
   36  * isl38xx_disable_interrupts - disable all interrupts
   37  * @device: pci memory base address
   38  *
   39  *  Instructs the device to disable all interrupt reporting by asserting
   40  *  the IRQ line. New events may still show up in the interrupt identification
   41  *  register located at offset %ISL38XX_INT_IDENT_REG.
   42  */
   43 void
   44 isl38xx_disable_interrupts(void __iomem *device)
   45 {
   46 	isl38xx_w32_flush(device, 0x00000000, ISL38XX_INT_EN_REG);
   47 	udelay(ISL38XX_WRITEIO_DELAY);
   48 }
   49 
   50 void
   51 isl38xx_handle_sleep_request(isl38xx_control_block *control_block,
   52 			     int *powerstate, void __iomem *device_base)
   53 {
   54 	/* device requests to go into sleep mode
   55 	 * check whether the transmit queues for data and management are empty */
   56 	if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ))
   57 		/* data tx queue not empty */
   58 		return;
   59 
   60 	if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ))
   61 		/* management tx queue not empty */
   62 		return;
   63 
   64 	/* check also whether received frames are pending */
   65 	if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_DATA_LQ))
   66 		/* data rx queue not empty */
   67 		return;
   68 
   69 	if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_MGMTQ))
   70 		/* management rx queue not empty */
   71 		return;
   72 
   73 #if VERBOSE > SHOW_ERROR_MESSAGES
   74 	DEBUG(SHOW_TRACING, "Device going to sleep mode\n");
   75 #endif
   76 
   77 	/* all queues are empty, allow the device to go into sleep mode */
   78 	*powerstate = ISL38XX_PSM_POWERSAVE_STATE;
   79 
   80 	/* assert the Sleep interrupt in the Device Interrupt Register */
   81 	isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_SLEEP,
   82 			  ISL38XX_DEV_INT_REG);
   83 	udelay(ISL38XX_WRITEIO_DELAY);
   84 }
   85 
   86 void
   87 isl38xx_handle_wakeup(isl38xx_control_block *control_block,
   88 		      int *powerstate, void __iomem *device_base)
   89 {
   90 	/* device is in active state, update the powerstate flag */
   91 	*powerstate = ISL38XX_PSM_ACTIVE_STATE;
   92 
   93 	/* now check whether there are frames pending for the card */
   94 	if (!isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ)
   95 	    && !isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ))
   96 		return;
   97 
   98 #if VERBOSE > SHOW_ERROR_MESSAGES
   99 	DEBUG(SHOW_ANYTHING, "Wake up handler trigger the device\n");
  100 #endif
  101 
  102 	/* either data or management transmit queue has a frame pending
  103 	 * trigger the device by setting the Update bit in the Device Int reg */
  104 	isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE,
  105 			  ISL38XX_DEV_INT_REG);
  106 	udelay(ISL38XX_WRITEIO_DELAY);
  107 }
  108 
  109 void
  110 isl38xx_trigger_device(int asleep, void __iomem *device_base)
  111 {
  112 	u32 reg;
  113 
  114 #if VERBOSE > SHOW_ERROR_MESSAGES
  115 	u32 counter = 0;
  116 	struct timeval current_time;
  117 	DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n");
  118 #endif
  119 
  120 	/* check whether the device is in power save mode */
  121 	if (asleep) {
  122 		/* device is in powersave, trigger the device for wakeup */
  123 #if VERBOSE > SHOW_ERROR_MESSAGES
  124 		do_gettimeofday(¤t_time);
  125 		DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n",
  126 		      current_time.tv_sec, (long)current_time.tv_usec);
  127 
  128 		DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
  129 		      current_time.tv_sec, (long)current_time.tv_usec,
  130 		      readl(device_base + ISL38XX_CTRL_STAT_REG));
  131 #endif
  132 
  133 		reg = readl(device_base + ISL38XX_INT_IDENT_REG);
  134 		if (reg == 0xabadface) {
  135 #if VERBOSE > SHOW_ERROR_MESSAGES
  136 			do_gettimeofday(¤t_time);
  137 			DEBUG(SHOW_TRACING,
  138 			      "%08li.%08li Device register abadface\n",
  139 			      current_time.tv_sec, (long)current_time.tv_usec);
  140 #endif
  141 			/* read the Device Status Register until Sleepmode bit is set */
  142 			while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG),
  143 			       (reg & ISL38XX_CTRL_STAT_SLEEPMODE) == 0) {
  144 				udelay(ISL38XX_WRITEIO_DELAY);
  145 #if VERBOSE > SHOW_ERROR_MESSAGES
  146 				counter++;
  147 #endif
  148 			}
  149 
  150 #if VERBOSE > SHOW_ERROR_MESSAGES
  151 			DEBUG(SHOW_TRACING,
  152 			      "%08li.%08li Device register read %08x\n",
  153 			      current_time.tv_sec, (long)current_time.tv_usec,
  154 			      readl(device_base + ISL38XX_CTRL_STAT_REG));
  155 			do_gettimeofday(¤t_time);
  156 			DEBUG(SHOW_TRACING,
  157 			      "%08li.%08li Device asleep counter %i\n",
  158 			      current_time.tv_sec, (long)current_time.tv_usec,
  159 			      counter);
  160 #endif
  161 		}
  162 		/* assert the Wakeup interrupt in the Device Interrupt Register */
  163 		isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_WAKEUP,
  164 				  ISL38XX_DEV_INT_REG);
  165 
  166 #if VERBOSE > SHOW_ERROR_MESSAGES
  167 		udelay(ISL38XX_WRITEIO_DELAY);
  168 
  169 		/* perform another read on the Device Status Register */
  170 		reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
  171 		do_gettimeofday(¤t_time);
  172 		DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
  173 		      current_time.tv_sec, (long)current_time.tv_usec, reg);
  174 #endif
  175 	} else {
  176 		/* device is (still) awake  */
  177 #if VERBOSE > SHOW_ERROR_MESSAGES
  178 		DEBUG(SHOW_TRACING, "Device is in active state\n");
  179 #endif
  180 		/* trigger the device by setting the Update bit in the Device Int reg */
  181 
  182 		isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE,
  183 				  ISL38XX_DEV_INT_REG);
  184 	}
  185 }
  186 
  187 void
  188 isl38xx_interface_reset(void __iomem *device_base, dma_addr_t host_address)
  189 {
  190 #if VERBOSE > SHOW_ERROR_MESSAGES
  191 	DEBUG(SHOW_FUNCTION_CALLS, "isl38xx_interface_reset\n");
  192 #endif
  193 
  194 	/* load the address of the control block in the device */
  195 	isl38xx_w32_flush(device_base, host_address, ISL38XX_CTRL_BLK_BASE_REG);
  196 	udelay(ISL38XX_WRITEIO_DELAY);
  197 
  198 	/* set the reset bit in the Device Interrupt Register */
  199 	isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_RESET, ISL38XX_DEV_INT_REG);
  200 	udelay(ISL38XX_WRITEIO_DELAY);
  201 
  202 	/* enable the interrupt for detecting initialization */
  203 
  204 	/* Note: Do not enable other interrupts here. We want the
  205 	 * device to have come up first 100% before allowing any other
  206 	 * interrupts. */
  207 	isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG);
  208 	udelay(ISL38XX_WRITEIO_DELAY);  /* allow complete full reset */
  209 }
  210 
  211 void
  212 isl38xx_enable_common_interrupts(void __iomem *device_base)
  213 {
  214 	u32 reg;
  215 
  216 	reg = ISL38XX_INT_IDENT_UPDATE | ISL38XX_INT_IDENT_SLEEP |
  217 	      ISL38XX_INT_IDENT_WAKEUP;
  218 	isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG);
  219 	udelay(ISL38XX_WRITEIO_DELAY);
  220 }
  221 
  222 int
  223 isl38xx_in_queue(isl38xx_control_block *cb, int queue)
  224 {
  225 	const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) -
  226 			   le32_to_cpu(cb->device_curr_frag[queue]));
  227 
  228 	/* determine the amount of fragments in the queue depending on the type
  229 	 * of the queue, either transmit or receive */
  230 
  231 	BUG_ON(delta < 0);	/* driver ptr must be ahead of device ptr */
  232 
  233 	switch (queue) {
  234 		/* send queues */
  235 	case ISL38XX_CB_TX_MGMTQ:
  236 		BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
  237 
  238 	case ISL38XX_CB_TX_DATA_LQ:
  239 	case ISL38XX_CB_TX_DATA_HQ:
  240 		BUG_ON(delta > ISL38XX_CB_TX_QSIZE);
  241 		return delta;
  242 
  243 		/* receive queues */
  244 	case ISL38XX_CB_RX_MGMTQ:
  245 		BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
  246 		return ISL38XX_CB_MGMT_QSIZE - delta;
  247 
  248 	case ISL38XX_CB_RX_DATA_LQ:
  249 	case ISL38XX_CB_RX_DATA_HQ:
  250 		BUG_ON(delta > ISL38XX_CB_RX_QSIZE);
  251 		return ISL38XX_CB_RX_QSIZE - delta;
  252 	}
  253 	BUG();
  254 	return 0;
  255 }                 1 
    2 /*
    3  *  Copyright (C) 2002 Intersil Americas Inc.
    4  *  Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
    5  *  Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
    6  *
    7  *  This program is free software; you can redistribute it and/or modify
    8  *  it under the terms of the GNU General Public License as published by
    9  *  the Free Software Foundation; either version 2 of the License
   10  *
   11  *  This program is distributed in the hope that it will be useful,
   12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14  *  GNU General Public License for more details.
   15  *
   16  *  You should have received a copy of the GNU General Public License
   17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
   18  *
   19  */
   20 
   21 #include <linux/hardirq.h>
   22 #include <linux/module.h>
   23 #include <linux/slab.h>
   24 
   25 #include <linux/netdevice.h>
   26 #include <linux/ethtool.h>
   27 #include <linux/pci.h>
   28 #include <linux/sched.h>
   29 #include <linux/etherdevice.h>
   30 #include <linux/delay.h>
   31 #include <linux/if_arp.h>
   32 
   33 #include <asm/io.h>
   34 
   35 #include "prismcompat.h"
   36 #include "isl_38xx.h"
   37 #include "isl_ioctl.h"
   38 #include "islpci_dev.h"
   39 #include "islpci_mgt.h"
   40 #include "islpci_eth.h"
   41 #include "oid_mgt.h"
   42 
   43 #define ISL3877_IMAGE_FILE	"isl3877"
   44 #define ISL3886_IMAGE_FILE	"isl3886"
   45 #define ISL3890_IMAGE_FILE	"isl3890"
   46 MODULE_FIRMWARE(ISL3877_IMAGE_FILE);
   47 MODULE_FIRMWARE(ISL3886_IMAGE_FILE);
   48 MODULE_FIRMWARE(ISL3890_IMAGE_FILE);
   49 
   50 static int prism54_bring_down(islpci_private *);
   51 static int islpci_alloc_memory(islpci_private *);
   52 
   53 /* Temporary dummy MAC address to use until firmware is loaded.
   54  * The idea there is that some tools (such as nameif) may query
   55  * the MAC address before the netdev is 'open'. By using a valid
   56  * OUI prefix, they can process the netdev properly.
   57  * Of course, this is not the final/real MAC address. It doesn't
   58  * matter, as you are suppose to be able to change it anytime via
   59  * ndev->set_mac_address. Jean II */
   60 static const unsigned char	dummy_mac[6] = { 0x00, 0x30, 0xB4, 0x00, 0x00, 0x00 };
   61 
   62 static int
   63 isl_upload_firmware(islpci_private *priv)
   64 {
   65 	u32 reg, rc;
   66 	void __iomem *device_base = priv->device_base;
   67 
   68 	/* clear the RAMBoot and the Reset bit */
   69 	reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
   70 	reg &= ~ISL38XX_CTRL_STAT_RESET;
   71 	reg &= ~ISL38XX_CTRL_STAT_RAMBOOT;
   72 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
   73 	wmb();
   74 	udelay(ISL38XX_WRITEIO_DELAY);
   75 
   76 	/* set the Reset bit without reading the register ! */
   77 	reg |= ISL38XX_CTRL_STAT_RESET;
   78 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
   79 	wmb();
   80 	udelay(ISL38XX_WRITEIO_DELAY);
   81 
   82 	/* clear the Reset bit */
   83 	reg &= ~ISL38XX_CTRL_STAT_RESET;
   84 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
   85 	wmb();
   86 
   87 	/* wait a while for the device to reboot */
   88 	mdelay(50);
   89 
   90 	{
   91 		const struct firmware *fw_entry = NULL;
   92 		long fw_len;
   93 		const u32 *fw_ptr;
   94 
   95 		rc = request_firmware(&fw_entry, priv->firmware, PRISM_FW_PDEV);
   96 		if (rc) {
   97 			printk(KERN_ERR
   98 			       "%s: request_firmware() failed for '%s'\n",
   99 			       "prism54", priv->firmware);
  100 			return rc;
  101 		}
  102 		/* prepare the Direct Memory Base register */
  103 		reg = ISL38XX_DEV_FIRMWARE_ADDRES;
  104 
  105 		fw_ptr = (u32 *) fw_entry->data;
  106 		fw_len = fw_entry->size;
  107 
  108 		if (fw_len % 4) {
  109 			printk(KERN_ERR
  110 			       "%s: firmware '%s' size is not multiple of 32bit, aborting!\n",
  111 			       "prism54", priv->firmware);
  112 			release_firmware(fw_entry);
  113 			return -EILSEQ; /* Illegal byte sequence  */;
  114 		}
  115 
  116 		while (fw_len > 0) {
  117 			long _fw_len =
  118 			    (fw_len >
  119 			     ISL38XX_MEMORY_WINDOW_SIZE) ?
  120 			    ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
  121 			u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
  122 
  123 			/* set the card's base address for writing the data */
  124 			isl38xx_w32_flush(device_base, reg,
  125 					  ISL38XX_DIR_MEM_BASE_REG);
  126 			wmb();	/* be paranoid */
  127 
  128 			/* increment the write address for next iteration */
  129 			reg += _fw_len;
  130 			fw_len -= _fw_len;
  131 
  132 			/* write the data to the Direct Memory Window 32bit-wise */
  133 			/* memcpy_toio() doesn't guarantee 32bit writes :-| */
  134 			while (_fw_len > 0) {
  135 				/* use non-swapping writel() */
  136 				__raw_writel(*fw_ptr, dev_fw_ptr);
  137 				fw_ptr++, dev_fw_ptr++;
  138 				_fw_len -= 4;
  139 			}
  140 
  141 			/* flush PCI posting */
  142 			(void) readl(device_base + ISL38XX_PCI_POSTING_FLUSH);
  143 			wmb();	/* be paranoid again */
  144 
  145 			BUG_ON(_fw_len != 0);
  146 		}
  147 
  148 		BUG_ON(fw_len != 0);
  149 
  150 		/* Firmware version is at offset 40 (also for "newmac") */
  151 		printk(KERN_DEBUG "%s: firmware version: %.8s\n",
  152 		       priv->ndev->name, fw_entry->data + 40);
  153 
  154 		release_firmware(fw_entry);
  155 	}
  156 
  157 	/* now reset the device
  158 	 * clear the Reset & ClkRun bit, set the RAMBoot bit */
  159 	reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
  160 	reg &= ~ISL38XX_CTRL_STAT_CLKRUN;
  161 	reg &= ~ISL38XX_CTRL_STAT_RESET;
  162 	reg |= ISL38XX_CTRL_STAT_RAMBOOT;
  163 	isl38xx_w32_flush(device_base, reg, ISL38XX_CTRL_STAT_REG);
  164 	wmb();
  165 	udelay(ISL38XX_WRITEIO_DELAY);
  166 
  167 	/* set the reset bit latches the host override and RAMBoot bits
  168 	 * into the device for operation when the reset bit is reset */
  169 	reg |= ISL38XX_CTRL_STAT_RESET;
  170 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
  171 	/* don't do flush PCI posting here! */
  172 	wmb();
  173 	udelay(ISL38XX_WRITEIO_DELAY);
  174 
  175 	/* clear the reset bit should start the whole circus */
  176 	reg &= ~ISL38XX_CTRL_STAT_RESET;
  177 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
  178 	/* don't do flush PCI posting here! */
  179 	wmb();
  180 	udelay(ISL38XX_WRITEIO_DELAY);
  181 
  182 	return 0;
  183 }
  184 
  185 /******************************************************************************
  186     Device Interrupt Handler
  187 ******************************************************************************/
  188 
  189 irqreturn_t
  190 islpci_interrupt(int irq, void *config)
  191 {
  192 	u32 reg;
  193 	islpci_private *priv = config;
  194 	struct net_device *ndev = priv->ndev;
  195 	void __iomem *device = priv->device_base;
  196 	int powerstate = ISL38XX_PSM_POWERSAVE_STATE;
  197 
  198 	/* lock the interrupt handler */
  199 	spin_lock(&priv->slock);
  200 
  201 	/* received an interrupt request on a shared IRQ line
  202 	 * first check whether the device is in sleep mode */
  203 	reg = readl(device + ISL38XX_CTRL_STAT_REG);
  204 	if (reg & ISL38XX_CTRL_STAT_SLEEPMODE)
  205 		/* device is in sleep mode, IRQ was generated by someone else */
  206 	{
  207 #if VERBOSE > SHOW_ERROR_MESSAGES
  208 		DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
  209 #endif
  210 		spin_unlock(&priv->slock);
  211 		return IRQ_NONE;
  212 	}
  213 
  214 
  215 	/* check whether there is any source of interrupt on the device */
  216 	reg = readl(device + ISL38XX_INT_IDENT_REG);
  217 
  218 	/* also check the contents of the Interrupt Enable Register, because this
  219 	 * will filter out interrupt sources from other devices on the same irq ! */
  220 	reg &= readl(device + ISL38XX_INT_EN_REG);
  221 	reg &= ISL38XX_INT_SOURCES;
  222 
  223 	if (reg != 0) {
  224 		if (islpci_get_state(priv) != PRV_STATE_SLEEP)
  225 			powerstate = ISL38XX_PSM_ACTIVE_STATE;
  226 
  227 		/* reset the request bits in the Identification register */
  228 		isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG);
  229 
  230 #if VERBOSE > SHOW_ERROR_MESSAGES
  231 		DEBUG(SHOW_FUNCTION_CALLS,
  232 		      "IRQ: Identification register 0x%p 0x%x\n", device, reg);
  233 #endif
  234 
  235 		/* check for each bit in the register separately */
  236 		if (reg & ISL38XX_INT_IDENT_UPDATE) {
  237 #if VERBOSE > SHOW_ERROR_MESSAGES
  238 			/* Queue has been updated */
  239 			DEBUG(SHOW_TRACING, "IRQ: Update flag\n");
  240 
  241 			DEBUG(SHOW_QUEUE_INDEXES,
  242 			      "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
  243 			      le32_to_cpu(priv->control_block->
  244 					  driver_curr_frag[0]),
  245 			      le32_to_cpu(priv->control_block->
  246 					  driver_curr_frag[1]),
  247 			      le32_to_cpu(priv->control_block->
  248 					  driver_curr_frag[2]),
  249 			      le32_to_cpu(priv->control_block->
  250 					  driver_curr_frag[3]),
  251 			      le32_to_cpu(priv->control_block->
  252 					  driver_curr_frag[4]),
  253 			      le32_to_cpu(priv->control_block->
  254 					  driver_curr_frag[5])
  255 			    );
  256 
  257 			DEBUG(SHOW_QUEUE_INDEXES,
  258 			      "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n",
  259 			      le32_to_cpu(priv->control_block->
  260 					  device_curr_frag[0]),
  261 			      le32_to_cpu(priv->control_block->
  262 					  device_curr_frag[1]),
  263 			      le32_to_cpu(priv->control_block->
  264 					  device_curr_frag[2]),
  265 			      le32_to_cpu(priv->control_block->
  266 					  device_curr_frag[3]),
  267 			      le32_to_cpu(priv->control_block->
  268 					  device_curr_frag[4]),
  269 			      le32_to_cpu(priv->control_block->
  270 					  device_curr_frag[5])
  271 			    );
  272 #endif
  273 
  274 			/* cleanup the data low transmit queue */
  275 			islpci_eth_cleanup_transmit(priv, priv->control_block);
  276 
  277 			/* device is in active state, update the
  278 			 * powerstate flag if necessary */
  279 			powerstate = ISL38XX_PSM_ACTIVE_STATE;
  280 
  281 			/* check all three queues in priority order
  282 			 * call the PIMFOR receive function until the
  283 			 * queue is empty */
  284 			if (isl38xx_in_queue(priv->control_block,
  285 						ISL38XX_CB_RX_MGMTQ) != 0) {
  286 #if VERBOSE > SHOW_ERROR_MESSAGES
  287 				DEBUG(SHOW_TRACING,
  288 				      "Received frame in Management Queue\n");
  289 #endif
  290 				islpci_mgt_receive(ndev);
  291 
  292 				islpci_mgt_cleanup_transmit(ndev);
  293 
  294 				/* Refill slots in receive queue */
  295 				islpci_mgmt_rx_fill(ndev);
  296 
  297 				/* no need to trigger the device, next
  298                                    islpci_mgt_transaction does it */
  299 			}
  300 
  301 			while (isl38xx_in_queue(priv->control_block,
  302 						ISL38XX_CB_RX_DATA_LQ) != 0) {
  303 #if VERBOSE > SHOW_ERROR_MESSAGES
  304 				DEBUG(SHOW_TRACING,
  305 				      "Received frame in Data Low Queue\n");
  306 #endif
  307 				islpci_eth_receive(priv);
  308 			}
  309 
  310 			/* check whether the data transmit queues were full */
  311 			if (priv->data_low_tx_full) {
  312 				/* check whether the transmit is not full anymore */
  313 				if (ISL38XX_CB_TX_QSIZE -
  314 				    isl38xx_in_queue(priv->control_block,
  315 						     ISL38XX_CB_TX_DATA_LQ) >=
  316 				    ISL38XX_MIN_QTHRESHOLD) {
  317 					/* nope, the driver is ready for more network frames */
  318 					netif_wake_queue(priv->ndev);
  319 
  320 					/* reset the full flag */
  321 					priv->data_low_tx_full = 0;
  322 				}
  323 			}
  324 		}
  325 
  326 		if (reg & ISL38XX_INT_IDENT_INIT) {
  327 			/* Device has been initialized */
  328 #if VERBOSE > SHOW_ERROR_MESSAGES
  329 			DEBUG(SHOW_TRACING,
  330 			      "IRQ: Init flag, device initialized\n");
  331 #endif
  332 			wake_up(&priv->reset_done);
  333 		}
  334 
  335 		if (reg & ISL38XX_INT_IDENT_SLEEP) {
  336 			/* Device intends to move to powersave state */
  337 #if VERBOSE > SHOW_ERROR_MESSAGES
  338 			DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
  339 #endif
  340 			isl38xx_handle_sleep_request(priv->control_block,
  341 						     &powerstate,
  342 						     priv->device_base);
  343 		}
  344 
  345 		if (reg & ISL38XX_INT_IDENT_WAKEUP) {
  346 			/* Device has been woken up to active state */
  347 #if VERBOSE > SHOW_ERROR_MESSAGES
  348 			DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
  349 #endif
  350 
  351 			isl38xx_handle_wakeup(priv->control_block,
  352 					      &powerstate, priv->device_base);
  353 		}
  354 	} else {
  355 #if VERBOSE > SHOW_ERROR_MESSAGES
  356 		DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
  357 #endif
  358 		spin_unlock(&priv->slock);
  359 		return IRQ_NONE;
  360 	}
  361 
  362 	/* sleep -> ready */
  363 	if (islpci_get_state(priv) == PRV_STATE_SLEEP
  364 	    && powerstate == ISL38XX_PSM_ACTIVE_STATE)
  365 		islpci_set_state(priv, PRV_STATE_READY);
  366 
  367 	/* !sleep -> sleep */
  368 	if (islpci_get_state(priv) != PRV_STATE_SLEEP
  369 	    && powerstate == ISL38XX_PSM_POWERSAVE_STATE)
  370 		islpci_set_state(priv, PRV_STATE_SLEEP);
  371 
  372 	/* unlock the interrupt handler */
  373 	spin_unlock(&priv->slock);
  374 
  375 	return IRQ_HANDLED;
  376 }
  377 
  378 /******************************************************************************
  379     Network Interface Control & Statistical functions
  380 ******************************************************************************/
  381 static int
  382 islpci_open(struct net_device *ndev)
  383 {
  384 	u32 rc;
  385 	islpci_private *priv = netdev_priv(ndev);
  386 
  387 	/* reset data structures, upload firmware and reset device */
  388 	rc = islpci_reset(priv,1);
  389 	if (rc) {
  390 		prism54_bring_down(priv);
  391 		return rc; /* Returns informative message */
  392 	}
  393 
  394 	netif_start_queue(ndev);
  395 
  396 	/* Turn off carrier if in STA or Ad-hoc mode. It will be turned on
  397 	 * once the firmware receives a trap of being associated
  398 	 * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we
  399 	 * should just leave the carrier on as its expected the firmware
  400 	 * won't send us a trigger. */
  401 	if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC)
  402 		netif_carrier_off(ndev);
  403 	else
  404 		netif_carrier_on(ndev);
  405 
  406 	return 0;
  407 }
  408 
  409 static int
  410 islpci_close(struct net_device *ndev)
  411 {
  412 	islpci_private *priv = netdev_priv(ndev);
  413 
  414 	printk(KERN_DEBUG "%s: islpci_close ()\n", ndev->name);
  415 
  416 	netif_stop_queue(ndev);
  417 
  418 	return prism54_bring_down(priv);
  419 }
  420 
  421 static int
  422 prism54_bring_down(islpci_private *priv)
  423 {
  424 	void __iomem *device_base = priv->device_base;
  425 	u32 reg;
  426 	/* we are going to shutdown the device */
  427 	islpci_set_state(priv, PRV_STATE_PREBOOT);
  428 
  429 	/* disable all device interrupts in case they weren't */
  430 	isl38xx_disable_interrupts(priv->device_base);
  431 
  432 	/* For safety reasons, we may want to ensure that no DMA transfer is
  433 	 * currently in progress by emptying the TX and RX queues. */
  434 
  435 	/* wait until interrupts have finished executing on other CPUs */
  436 	synchronize_irq(priv->pdev->irq);
  437 
  438 	reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
  439 	reg &= ~(ISL38XX_CTRL_STAT_RESET | ISL38XX_CTRL_STAT_RAMBOOT);
  440 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
  441 	wmb();
  442 	udelay(ISL38XX_WRITEIO_DELAY);
  443 
  444 	reg |= ISL38XX_CTRL_STAT_RESET;
  445 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
  446 	wmb();
  447 	udelay(ISL38XX_WRITEIO_DELAY);
  448 
  449 	/* clear the Reset bit */
  450 	reg &= ~ISL38XX_CTRL_STAT_RESET;
  451 	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
  452 	wmb();
  453 
  454 	/* wait a while for the device to reset */
  455 	schedule_timeout_uninterruptible(msecs_to_jiffies(50));
  456 
  457 	return 0;
  458 }
  459 
  460 static int
  461 islpci_upload_fw(islpci_private *priv)
  462 {
  463 	islpci_state_t old_state;
  464 	u32 rc;
  465 
  466 	old_state = islpci_set_state(priv, PRV_STATE_BOOT);
  467 
  468 	printk(KERN_DEBUG "%s: uploading firmware...\n", priv->ndev->name);
  469 
  470 	rc = isl_upload_firmware(priv);
  471 	if (rc) {
  472 		/* error uploading the firmware */
  473 		printk(KERN_ERR "%s: could not upload firmware ('%s')\n",
  474 		       priv->ndev->name, priv->firmware);
  475 
  476 		islpci_set_state(priv, old_state);
  477 		return rc;
  478 	}
  479 
  480 	printk(KERN_DEBUG "%s: firmware upload complete\n",
  481 	       priv->ndev->name);
  482 
  483 	islpci_set_state(priv, PRV_STATE_POSTBOOT);
  484 
  485 	return 0;
  486 }
  487 
  488 static int
  489 islpci_reset_if(islpci_private *priv)
  490 {
  491 	long remaining;
  492 	int result = -ETIME;
  493 	int count;
  494 
  495 	DEFINE_WAIT(wait);
  496 	prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);
  497 
  498 	/* now the last step is to reset the interface */
  499 	isl38xx_interface_reset(priv->device_base, priv->device_host_address);
  500 	islpci_set_state(priv, PRV_STATE_PREINIT);
  501 
  502         for(count = 0; count < 2 && result; count++) {
  503 		/* The software reset acknowledge needs about 220 msec here.
  504 		 * Be conservative and wait for up to one second. */
  505 
  506 		remaining = schedule_timeout_uninterruptible(HZ);
  507 
  508 		if(remaining > 0) {
  509 			result = 0;
  510 			break;
  511 		}
  512 
  513 		/* If we're here it's because our IRQ hasn't yet gone through.
  514 		 * Retry a bit more...
  515 		 */
  516 		printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
  517 			priv->ndev->name);
  518 	}
  519 
  520 	finish_wait(&priv->reset_done, &wait);
  521 
  522 	if (result) {
  523 		printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
  524 		return result;
  525 	}
  526 
  527 	islpci_set_state(priv, PRV_STATE_INIT);
  528 
  529 	/* Now that the device is 100% up, let's allow
  530 	 * for the other interrupts --
  531 	 * NOTE: this is not *yet* true since we've only allowed the
  532 	 * INIT interrupt on the IRQ line. We can perhaps poll
  533 	 * the IRQ line until we know for sure the reset went through */
  534 	isl38xx_enable_common_interrupts(priv->device_base);
  535 
  536 	down_write(&priv->mib_sem);
  537 	result = mgt_commit(priv);
  538 	if (result) {
  539 		printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
  540 		up_write(&priv->mib_sem);
  541 		return result;
  542 	}
  543 	up_write(&priv->mib_sem);
  544 
  545 	islpci_set_state(priv, PRV_STATE_READY);
  546 
  547 	printk(KERN_DEBUG "%s: interface reset complete\n", priv->ndev->name);
  548 	return 0;
  549 }
  550 
  551 int
  552 islpci_reset(islpci_private *priv, int reload_firmware)
  553 {
  554 	isl38xx_control_block *cb =    /* volatile not needed */
  555 		(isl38xx_control_block *) priv->control_block;
  556 	unsigned counter;
  557 	int rc;
  558 
  559 	if (reload_firmware)
  560 		islpci_set_state(priv, PRV_STATE_PREBOOT);
  561 	else
  562 		islpci_set_state(priv, PRV_STATE_POSTBOOT);
  563 
  564 	printk(KERN_DEBUG "%s: resetting device...\n", priv->ndev->name);
  565 
  566 	/* disable all device interrupts in case they weren't */
  567 	isl38xx_disable_interrupts(priv->device_base);
  568 
  569 	/* flush all management queues */
  570 	priv->index_mgmt_tx = 0;
  571 	priv->index_mgmt_rx = 0;
  572 
  573 	/* clear the indexes in the frame pointer */
  574 	for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) {
  575 		cb->driver_curr_frag[counter] = cpu_to_le32(0);
  576 		cb->device_curr_frag[counter] = cpu_to_le32(0);
  577 	}
  578 
  579 	/* reset the mgmt receive queue */
  580 	for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
  581 		isl38xx_fragment *frag = &cb->rx_data_mgmt[counter];
  582 		frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
  583 		frag->flags = 0;
  584 		frag->address = cpu_to_le32(priv->mgmt_rx[counter].pci_addr);
  585 	}
  586 
  587 	for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
  588 		cb->rx_data_low[counter].address =
  589 		    cpu_to_le32((u32) priv->pci_map_rx_address[counter]);
  590 	}
  591 
  592 	/* since the receive queues are filled with empty fragments, now we can
  593 	 * set the corresponding indexes in the Control Block */
  594 	priv->control_block->driver_curr_frag[ISL38XX_CB_RX_DATA_LQ] =
  595 	    cpu_to_le32(ISL38XX_CB_RX_QSIZE);
  596 	priv->control_block->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] =
  597 	    cpu_to_le32(ISL38XX_CB_MGMT_QSIZE);
  598 
  599 	/* reset the remaining real index registers and full flags */
  600 	priv->free_data_rx = 0;
  601 	priv->free_data_tx = 0;
  602 	priv->data_low_tx_full = 0;
  603 
  604 	if (reload_firmware) { /* Should we load the firmware ? */
  605 	/* now that the data structures are cleaned up, upload
  606 	 * firmware and reset interface */
  607 		rc = islpci_upload_fw(priv);
  608 		if (rc) {
  609 			printk(KERN_ERR "%s: islpci_reset: failure\n",
  610 				priv->ndev->name);
  611 			return rc;
  612 		}
  613 	}
  614 
  615 	/* finally reset interface */
  616 	rc = islpci_reset_if(priv);
  617 	if (rc)
  618 		printk(KERN_ERR "prism54: Your card/socket may be faulty, or IRQ line too busy :(\n");
  619 	return rc;
  620 }
  621 
  622 /******************************************************************************
  623     Network device configuration functions
  624 ******************************************************************************/
  625 static int
  626 islpci_alloc_memory(islpci_private *priv)
  627 {
  628 	int counter;
  629 
  630 #if VERBOSE > SHOW_ERROR_MESSAGES
  631 	printk(KERN_DEBUG "islpci_alloc_memory\n");
  632 #endif
  633 
  634 	/* remap the PCI device base address to accessible */
  635 	if (!(priv->device_base =
  636 	      ioremap(pci_resource_start(priv->pdev, 0),
  637 		      ISL38XX_PCI_MEM_SIZE))) {
  638 		/* error in remapping the PCI device memory address range */
  639 		printk(KERN_ERR "PCI memory remapping failed\n");
  640 		return -1;
  641 	}
  642 
  643 	/* memory layout for consistent DMA region:
  644 	 *
  645 	 * Area 1: Control Block for the device interface
  646 	 * Area 2: Power Save Mode Buffer for temporary frame storage. Be aware that
  647 	 *         the number of supported stations in the AP determines the minimal
  648 	 *         size of the buffer !
  649 	 */
  650 
  651 	/* perform the allocation */
  652 	priv->driver_mem_address = pci_alloc_consistent(priv->pdev,
  653 							HOST_MEM_BLOCK,
  654 							&priv->
  655 							device_host_address);
  656 
  657 	if (!priv->driver_mem_address) {
  658 		/* error allocating the block of PCI memory */
  659 		printk(KERN_ERR "%s: could not allocate DMA memory, aborting!",
  660 		       "prism54");
  661 		return -1;
  662 	}
  663 
  664 	/* assign the Control Block to the first address of the allocated area */
  665 	priv->control_block =
  666 	    (isl38xx_control_block *) priv->driver_mem_address;
  667 
  668 	/* set the Power Save Buffer pointer directly behind the CB */
  669 	priv->device_psm_buffer =
  670 		priv->device_host_address + CONTROL_BLOCK_SIZE;
  671 
  672 	/* make sure all buffer pointers are initialized */
  673 	for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) {
  674 		priv->control_block->driver_curr_frag[counter] = cpu_to_le32(0);
  675 		priv->control_block->device_curr_frag[counter] = cpu_to_le32(0);
  676 	}
  677 
  678 	priv->index_mgmt_rx = 0;
  679 	memset(priv->mgmt_rx, 0, sizeof(priv->mgmt_rx));
  680 	memset(priv->mgmt_tx, 0, sizeof(priv->mgmt_tx));
  681 
  682 	/* allocate rx queue for management frames */
  683 	if (islpci_mgmt_rx_fill(priv->ndev) < 0)
  684 		goto out_free;
  685 
  686 	/* now get the data rx skb's */
  687 	memset(priv->data_low_rx, 0, sizeof (priv->data_low_rx));
  688 	memset(priv->pci_map_rx_address, 0, sizeof (priv->pci_map_rx_address));
  689 
  690 	for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
  691 		struct sk_buff *skb;
  692 
  693 		/* allocate an sk_buff for received data frames storage
  694 		 * each frame on receive size consists of 1 fragment
  695 		 * include any required allignment operations */
  696 		if (!(skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2))) {
  697 			/* error allocating an sk_buff structure elements */
  698 			printk(KERN_ERR "Error allocating skb.\n");
  699 			skb = NULL;
  700 			goto out_free;
  701 		}
  702 		skb_reserve(skb, (4 - (long) skb->data) & 0x03);
  703 		/* add the new allocated sk_buff to the buffer array */
  704 		priv->data_low_rx[counter] = skb;
  705 
  706 		/* map the allocated skb data area to pci */
  707 		priv->pci_map_rx_address[counter] =
  708 		    pci_map_single(priv->pdev, (void *) skb->data,
  709 				   MAX_FRAGMENT_SIZE_RX + 2,
  710 				   PCI_DMA_FROMDEVICE);
  711 		if (!priv->pci_map_rx_address[counter]) {
  712 			/* error mapping the buffer to device
  713 			   accessible memory address */
  714 			printk(KERN_ERR "failed to map skb DMA'able\n");
  715 			goto out_free;
  716 		}
  717 	}
  718 
  719 	prism54_acl_init(&priv->acl);
  720 	prism54_wpa_bss_ie_init(priv);
  721 	if (mgt_init(priv))
  722 		goto out_free;
  723 
  724 	return 0;
  725  out_free:
  726 	islpci_free_memory(priv);
  727 	return -1;
  728 }
  729 
  730 int
  731 islpci_free_memory(islpci_private *priv)
  732 {
  733 	int counter;
  734 
  735 	if (priv->device_base)
  736 		iounmap(priv->device_base);
  737 	priv->device_base = NULL;
  738 
  739 	/* free consistent DMA area... */
  740 	if (priv->driver_mem_address)
  741 		pci_free_consistent(priv->pdev, HOST_MEM_BLOCK,
  742 				    priv->driver_mem_address,
  743 				    priv->device_host_address);
  744 
  745 	/* clear some dangling pointers */
  746 	priv->driver_mem_address = NULL;
  747 	priv->device_host_address = 0;
  748 	priv->device_psm_buffer = 0;
  749 	priv->control_block = NULL;
  750 
  751         /* clean up mgmt rx buffers */
  752         for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
  753 		struct islpci_membuf *buf = &priv->mgmt_rx[counter];
  754 		if (buf->pci_addr)
  755 			pci_unmap_single(priv->pdev, buf->pci_addr,
  756 					 buf->size, PCI_DMA_FROMDEVICE);
  757 		buf->pci_addr = 0;
  758 		kfree(buf->mem);
  759 		buf->size = 0;
  760 		buf->mem = NULL;
  761         }
  762 
  763 	/* clean up data rx buffers */
  764 	for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
  765 		if (priv->pci_map_rx_address[counter])
  766 			pci_unmap_single(priv->pdev,
  767 					 priv->pci_map_rx_address[counter],
  768 					 MAX_FRAGMENT_SIZE_RX + 2,
  769 					 PCI_DMA_FROMDEVICE);
  770 		priv->pci_map_rx_address[counter] = 0;
  771 
  772 		if (priv->data_low_rx[counter])
  773 			dev_kfree_skb(priv->data_low_rx[counter]);
  774 		priv->data_low_rx[counter] = NULL;
  775 	}
  776 
  777 	/* Free the access control list and the WPA list */
  778 	prism54_acl_clean(&priv->acl);
  779 	prism54_wpa_bss_ie_clean(priv);
  780 	mgt_clean(priv);
  781 
  782 	return 0;
  783 }
  784 
  785 #if 0
  786 static void
  787 islpci_set_multicast_list(struct net_device *dev)
  788 {
  789 	/* put device into promisc mode and let network layer handle it */
  790 }
  791 #endif
  792 
  793 static void islpci_ethtool_get_drvinfo(struct net_device *dev,
  794                                        struct ethtool_drvinfo *info)
  795 {
  796 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  797 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  798 }
  799 
  800 static const struct ethtool_ops islpci_ethtool_ops = {
  801 	.get_drvinfo = islpci_ethtool_get_drvinfo,
  802 };
  803 
  804 static const struct net_device_ops islpci_netdev_ops = {
  805 	.ndo_open 		= islpci_open,
  806 	.ndo_stop		= islpci_close,
  807 	.ndo_start_xmit		= islpci_eth_transmit,
  808 	.ndo_tx_timeout		= islpci_eth_tx_timeout,
  809 	.ndo_set_mac_address 	= prism54_set_mac_address,
  810 	.ndo_change_mtu		= eth_change_mtu,
  811 	.ndo_validate_addr	= eth_validate_addr,
  812 };
  813 
  814 static struct device_type wlan_type = {
  815 	.name	= "wlan",
  816 };
  817 
  818 struct net_device *
  819 islpci_setup(struct pci_dev *pdev)
  820 {
  821 	islpci_private *priv;
  822 	struct net_device *ndev = alloc_etherdev(sizeof (islpci_private));
  823 
  824 	if (!ndev)
  825 		return ndev;
  826 
  827 	pci_set_drvdata(pdev, ndev);
  828 	SET_NETDEV_DEV(ndev, &pdev->dev);
  829 	SET_NETDEV_DEVTYPE(ndev, &wlan_type);
  830 
  831 	/* setup the structure members */
  832 	ndev->base_addr = pci_resource_start(pdev, 0);
  833 	ndev->irq = pdev->irq;
  834 
  835 	/* initialize the function pointers */
  836 	ndev->netdev_ops = &islpci_netdev_ops;
  837 	ndev->wireless_handlers = &prism54_handler_def;
  838 	ndev->ethtool_ops = &islpci_ethtool_ops;
  839 
  840 	/* ndev->set_multicast_list = &islpci_set_multicast_list; */
  841 	ndev->addr_len = ETH_ALEN;
  842 	/* Get a non-zero dummy MAC address for nameif. Jean II */
  843 	memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
  844 
  845 	ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
  846 
  847 	/* allocate a private device structure to the network device  */
  848 	priv = netdev_priv(ndev);
  849 	priv->ndev = ndev;
  850 	priv->pdev = pdev;
  851 	priv->monitor_type = ARPHRD_IEEE80211;
  852 	priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ?
  853 		priv->monitor_type : ARPHRD_ETHER;
  854 
  855 	/* Add pointers to enable iwspy support. */
  856 	priv->wireless_data.spy_data = &priv->spy_data;
  857 	ndev->wireless_data = &priv->wireless_data;
  858 
  859 	/* save the start and end address of the PCI memory area */
  860 	ndev->mem_start = (unsigned long) priv->device_base;
  861 	ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE;
  862 
  863 #if VERBOSE > SHOW_ERROR_MESSAGES
  864 	DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base);
  865 #endif
  866 
  867 	init_waitqueue_head(&priv->reset_done);
  868 
  869 	/* init the queue read locks, process wait counter */
  870 	mutex_init(&priv->mgmt_lock);
  871 	priv->mgmt_received = NULL;
  872 	init_waitqueue_head(&priv->mgmt_wqueue);
  873 	mutex_init(&priv->stats_lock);
  874 	spin_lock_init(&priv->slock);
  875 
  876 	/* init state machine with off#1 state */
  877 	priv->state = PRV_STATE_OFF;
  878 	priv->state_off = 1;
  879 
  880 	/* initialize workqueue's */
  881 	INIT_WORK(&priv->stats_work, prism54_update_stats);
  882 	priv->stats_timestamp = 0;
  883 
  884 	INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
  885 	priv->reset_task_pending = 0;
  886 
  887 	/* allocate various memory areas */
  888 	if (islpci_alloc_memory(priv))
  889 		goto do_free_netdev;
  890 
  891 	/* select the firmware file depending on the device id */
  892 	switch (pdev->device) {
  893 	case 0x3877:
  894 		strcpy(priv->firmware, ISL3877_IMAGE_FILE);
  895 		break;
  896 
  897 	case 0x3886:
  898 		strcpy(priv->firmware, ISL3886_IMAGE_FILE);
  899 		break;
  900 
  901 	default:
  902 		strcpy(priv->firmware, ISL3890_IMAGE_FILE);
  903 		break;
  904 	}
  905 
  906 	if (register_netdev(ndev)) {
  907 		DEBUG(SHOW_ERROR_MESSAGES,
  908 		      "ERROR: register_netdev() failed\n");
  909 		goto do_islpci_free_memory;
  910 	}
  911 
  912 	return ndev;
  913 
  914       do_islpci_free_memory:
  915 	islpci_free_memory(priv);
  916       do_free_netdev:
  917 	free_netdev(ndev);
  918 	priv = NULL;
  919 	return NULL;
  920 }
  921 
  922 islpci_state_t
  923 islpci_set_state(islpci_private *priv, islpci_state_t new_state)
  924 {
  925 	islpci_state_t old_state;
  926 
  927 	/* lock */
  928 	old_state = priv->state;
  929 
  930 	/* this means either a race condition or some serious error in
  931 	 * the driver code */
  932 	switch (new_state) {
  933 	case PRV_STATE_OFF:
  934 		priv->state_off++;
  935 	default:
  936 		priv->state = new_state;
  937 		break;
  938 
  939 	case PRV_STATE_PREBOOT:
  940 		/* there are actually many off-states, enumerated by
  941 		 * state_off */
  942 		if (old_state == PRV_STATE_OFF)
  943 			priv->state_off--;
  944 
  945 		/* only if hw_unavailable is zero now it means we either
  946 		 * were in off#1 state, or came here from
  947 		 * somewhere else */
  948 		if (!priv->state_off)
  949 			priv->state = new_state;
  950 		break;
  951 	}
  952 #if 0
  953 	printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n",
  954 	       priv->ndev->name, old_state, new_state, priv->state_off);
  955 #endif
  956 
  957 	/* invariants */
  958 	BUG_ON(priv->state_off < 0);
  959 	BUG_ON(priv->state_off && (priv->state != PRV_STATE_OFF));
  960 	BUG_ON(!priv->state_off && (priv->state == PRV_STATE_OFF));
  961 
  962 	/* unlock */
  963 	return old_state;
  964 }
  965 
  966 
  967 
  968 
  969 
  970 /* LDV_COMMENT_BEGIN_MAIN */
  971 #ifdef LDV_MAIN4_sequence_infinite_withcheck_stateful
  972 
  973 /*###########################################################################*/
  974 
  975 /*############## Driver Environment Generator 0.2 output ####################*/
  976 
  977 /*###########################################################################*/
  978 
  979 
  980 
  981 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  982 void ldv_check_final_state(void);
  983 
  984 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
  985 void ldv_check_return_value(int res);
  986 
  987 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
  988 void ldv_check_return_value_probe(int res);
  989 
  990 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
  991 void ldv_initialize(void);
  992 
  993 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
  994 void ldv_handler_precall(void);
  995 
  996 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
  997 int nondet_int(void);
  998 
  999 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 1000 int LDV_IN_INTERRUPT;
 1001 
 1002 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 1003 void ldv_main4_sequence_infinite_withcheck_stateful(void) {
 1004 
 1005 
 1006 
 1007 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 1008 	/*============================= VARIABLE DECLARATION PART   =============================*/
 1009 	/** STRUCT: struct type: ethtool_ops, struct name: islpci_ethtool_ops **/
 1010 	/* content: static void islpci_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
 1011 	/* LDV_COMMENT_BEGIN_PREP */
 1012 	#define ISL3877_IMAGE_FILE	"isl3877"
 1013 	#define ISL3886_IMAGE_FILE	"isl3886"
 1014 	#define ISL3890_IMAGE_FILE	"isl3890"
 1015 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1016 	#endif
 1017 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1018 	#endif
 1019 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1020 	#endif
 1021 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1022 	#endif
 1023 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1024 	#endif
 1025 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1026 	#endif
 1027 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1028 	#endif
 1029 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1030 	#endif
 1031 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1032 	#endif
 1033 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1034 	#endif
 1035 	#if 0
 1036 	#endif
 1037 	/* LDV_COMMENT_END_PREP */
 1038 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "islpci_ethtool_get_drvinfo" */
 1039 	struct net_device * var_group1;
 1040 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "islpci_ethtool_get_drvinfo" */
 1041 	struct ethtool_drvinfo * var_group2;
 1042 	/* LDV_COMMENT_BEGIN_PREP */
 1043 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1044 	#endif
 1045 	#if 0
 1046 	#endif
 1047 	/* LDV_COMMENT_END_PREP */
 1048 
 1049 	/** STRUCT: struct type: net_device_ops, struct name: islpci_netdev_ops **/
 1050 	/* content: static int islpci_open(struct net_device *ndev)*/
 1051 	/* LDV_COMMENT_BEGIN_PREP */
 1052 	#define ISL3877_IMAGE_FILE	"isl3877"
 1053 	#define ISL3886_IMAGE_FILE	"isl3886"
 1054 	#define ISL3890_IMAGE_FILE	"isl3890"
 1055 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1056 	#endif
 1057 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1058 	#endif
 1059 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1060 	#endif
 1061 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1062 	#endif
 1063 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1064 	#endif
 1065 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1066 	#endif
 1067 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1068 	#endif
 1069 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1070 	#endif
 1071 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1072 	#endif
 1073 	/* LDV_COMMENT_END_PREP */
 1074 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "islpci_open" */
 1075 	static int res_islpci_open_2;
 1076 	/* LDV_COMMENT_BEGIN_PREP */
 1077 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1078 	#endif
 1079 	#if 0
 1080 	#endif
 1081 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1082 	#endif
 1083 	#if 0
 1084 	#endif
 1085 	/* LDV_COMMENT_END_PREP */
 1086 	/* content: static int islpci_close(struct net_device *ndev)*/
 1087 	/* LDV_COMMENT_BEGIN_PREP */
 1088 	#define ISL3877_IMAGE_FILE	"isl3877"
 1089 	#define ISL3886_IMAGE_FILE	"isl3886"
 1090 	#define ISL3890_IMAGE_FILE	"isl3890"
 1091 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1092 	#endif
 1093 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1094 	#endif
 1095 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1096 	#endif
 1097 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1098 	#endif
 1099 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1100 	#endif
 1101 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1102 	#endif
 1103 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1104 	#endif
 1105 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1106 	#endif
 1107 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1108 	#endif
 1109 	/* LDV_COMMENT_END_PREP */
 1110 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "islpci_close" */
 1111 	static int res_islpci_close_3;
 1112 	/* LDV_COMMENT_BEGIN_PREP */
 1113 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1114 	#endif
 1115 	#if 0
 1116 	#endif
 1117 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1118 	#endif
 1119 	#if 0
 1120 	#endif
 1121 	/* LDV_COMMENT_END_PREP */
 1122 
 1123 	/** CALLBACK SECTION request_irq **/
 1124 	/* content: irqreturn_t islpci_interrupt(int irq, void *config)*/
 1125 	/* LDV_COMMENT_BEGIN_PREP */
 1126 	#define ISL3877_IMAGE_FILE	"isl3877"
 1127 	#define ISL3886_IMAGE_FILE	"isl3886"
 1128 	#define ISL3890_IMAGE_FILE	"isl3890"
 1129 	/* LDV_COMMENT_END_PREP */
 1130 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "islpci_interrupt" */
 1131 	int  var_islpci_interrupt_1_p0;
 1132 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "islpci_interrupt" */
 1133 	void * var_islpci_interrupt_1_p1;
 1134 	/* LDV_COMMENT_BEGIN_PREP */
 1135 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1136 	#endif
 1137 	#if 0
 1138 	#endif
 1139 	#if VERBOSE > SHOW_ERROR_MESSAGES
 1140 	#endif
 1141 	#if 0
 1142 	#endif
 1143 	/* LDV_COMMENT_END_PREP */
 1144 
 1145 
 1146 
 1147 
 1148 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 1149 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 1150 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 1151 	LDV_IN_INTERRUPT=1;
 1152 
 1153 
 1154 
 1155 
 1156 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 1157 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 1158 	/*============================= FUNCTION CALL SECTION       =============================*/
 1159 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 1160 	ldv_initialize();
 1161 	
 1162 
 1163 	int ldv_s_islpci_netdev_ops_net_device_ops = 0;
 1164 
 1165 	
 1166 
 1167 
 1168 	while(  nondet_int()
 1169 		|| !(ldv_s_islpci_netdev_ops_net_device_ops == 0)
 1170 	) {
 1171 
 1172 		switch(nondet_int()) {
 1173 
 1174 			case 0: {
 1175 
 1176 				/** STRUCT: struct type: ethtool_ops, struct name: islpci_ethtool_ops **/
 1177 				
 1178 
 1179 				/* content: static void islpci_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
 1180 				/* LDV_COMMENT_BEGIN_PREP */
 1181 				#define ISL3877_IMAGE_FILE	"isl3877"
 1182 				#define ISL3886_IMAGE_FILE	"isl3886"
 1183 				#define ISL3890_IMAGE_FILE	"isl3890"
 1184 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1185 				#endif
 1186 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1187 				#endif
 1188 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1189 				#endif
 1190 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1191 				#endif
 1192 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1193 				#endif
 1194 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1195 				#endif
 1196 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1197 				#endif
 1198 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1199 				#endif
 1200 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1201 				#endif
 1202 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1203 				#endif
 1204 				#if 0
 1205 				#endif
 1206 				/* LDV_COMMENT_END_PREP */
 1207 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_drvinfo" from driver structure with callbacks "islpci_ethtool_ops" */
 1208 				ldv_handler_precall();
 1209 				islpci_ethtool_get_drvinfo( var_group1, var_group2);
 1210 				/* LDV_COMMENT_BEGIN_PREP */
 1211 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1212 				#endif
 1213 				#if 0
 1214 				#endif
 1215 				/* LDV_COMMENT_END_PREP */
 1216 				
 1217 
 1218 				
 1219 
 1220 			}
 1221 
 1222 			break;
 1223 			case 1: {
 1224 
 1225 				/** STRUCT: struct type: net_device_ops, struct name: islpci_netdev_ops **/
 1226 				if(ldv_s_islpci_netdev_ops_net_device_ops==0) {
 1227 
 1228 				/* content: static int islpci_open(struct net_device *ndev)*/
 1229 				/* LDV_COMMENT_BEGIN_PREP */
 1230 				#define ISL3877_IMAGE_FILE	"isl3877"
 1231 				#define ISL3886_IMAGE_FILE	"isl3886"
 1232 				#define ISL3890_IMAGE_FILE	"isl3890"
 1233 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1234 				#endif
 1235 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1236 				#endif
 1237 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1238 				#endif
 1239 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1240 				#endif
 1241 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1242 				#endif
 1243 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1244 				#endif
 1245 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1246 				#endif
 1247 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1248 				#endif
 1249 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1250 				#endif
 1251 				/* LDV_COMMENT_END_PREP */
 1252 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "islpci_netdev_ops". Standart function test for correct return result. */
 1253 				ldv_handler_precall();
 1254 				res_islpci_open_2 = islpci_open( var_group1);
 1255 				 ldv_check_return_value(res_islpci_open_2);
 1256 				 if(res_islpci_open_2 < 0) 
 1257 					goto ldv_module_exit;
 1258 				/* LDV_COMMENT_BEGIN_PREP */
 1259 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1260 				#endif
 1261 				#if 0
 1262 				#endif
 1263 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1264 				#endif
 1265 				#if 0
 1266 				#endif
 1267 				/* LDV_COMMENT_END_PREP */
 1268 				ldv_s_islpci_netdev_ops_net_device_ops++;
 1269 
 1270 				}
 1271 
 1272 			}
 1273 
 1274 			break;
 1275 			case 2: {
 1276 
 1277 				/** STRUCT: struct type: net_device_ops, struct name: islpci_netdev_ops **/
 1278 				if(ldv_s_islpci_netdev_ops_net_device_ops==1) {
 1279 
 1280 				/* content: static int islpci_close(struct net_device *ndev)*/
 1281 				/* LDV_COMMENT_BEGIN_PREP */
 1282 				#define ISL3877_IMAGE_FILE	"isl3877"
 1283 				#define ISL3886_IMAGE_FILE	"isl3886"
 1284 				#define ISL3890_IMAGE_FILE	"isl3890"
 1285 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1286 				#endif
 1287 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1288 				#endif
 1289 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1290 				#endif
 1291 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1292 				#endif
 1293 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1294 				#endif
 1295 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1296 				#endif
 1297 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1298 				#endif
 1299 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1300 				#endif
 1301 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1302 				#endif
 1303 				/* LDV_COMMENT_END_PREP */
 1304 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "islpci_netdev_ops". Standart function test for correct return result. */
 1305 				ldv_handler_precall();
 1306 				res_islpci_close_3 = islpci_close( var_group1);
 1307 				 ldv_check_return_value(res_islpci_close_3);
 1308 				 if(res_islpci_close_3) 
 1309 					goto ldv_module_exit;
 1310 				/* LDV_COMMENT_BEGIN_PREP */
 1311 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1312 				#endif
 1313 				#if 0
 1314 				#endif
 1315 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1316 				#endif
 1317 				#if 0
 1318 				#endif
 1319 				/* LDV_COMMENT_END_PREP */
 1320 				ldv_s_islpci_netdev_ops_net_device_ops=0;
 1321 
 1322 				}
 1323 
 1324 			}
 1325 
 1326 			break;
 1327 			case 3: {
 1328 
 1329 				/** CALLBACK SECTION request_irq **/
 1330 				LDV_IN_INTERRUPT=2;
 1331 
 1332 				/* content: irqreturn_t islpci_interrupt(int irq, void *config)*/
 1333 				/* LDV_COMMENT_BEGIN_PREP */
 1334 				#define ISL3877_IMAGE_FILE	"isl3877"
 1335 				#define ISL3886_IMAGE_FILE	"isl3886"
 1336 				#define ISL3890_IMAGE_FILE	"isl3890"
 1337 				/* LDV_COMMENT_END_PREP */
 1338 				/* LDV_COMMENT_FUNCTION_CALL */
 1339 				ldv_handler_precall();
 1340 				islpci_interrupt( var_islpci_interrupt_1_p0, var_islpci_interrupt_1_p1);
 1341 				/* LDV_COMMENT_BEGIN_PREP */
 1342 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1343 				#endif
 1344 				#if 0
 1345 				#endif
 1346 				#if VERBOSE > SHOW_ERROR_MESSAGES
 1347 				#endif
 1348 				#if 0
 1349 				#endif
 1350 				/* LDV_COMMENT_END_PREP */
 1351 				LDV_IN_INTERRUPT=1;
 1352 
 1353 				
 1354 
 1355 			}
 1356 
 1357 			break;
 1358 			default: break;
 1359 
 1360 		}
 1361 
 1362 	}
 1363 
 1364 	ldv_module_exit: 
 1365 
 1366 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1367 	ldv_final: ldv_check_final_state();
 1368 
 1369 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1370 	return;
 1371 
 1372 }
 1373 #endif
 1374 
 1375 /* LDV_COMMENT_END_MAIN */                 1 
    2  #include <linux/types.h>
    3  #include <linux/dma-direction.h>
    4 
    5  extern dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
    6  extern dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir);
    7  extern dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
    8  extern int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
    9 #line 1 "/home/druidos/temp/331_1a/work/current--X--drivers--X--defaultlinux-3.14.1.tar.xz--X--331_1a--X--cpachecker/linux-3.14.1.tar.xz/csd_deg_dscv/6673/dscv_tempdir/dscv/ri/331_1a/drivers/net/wireless/prism54/islpci_eth.c"
   10 /*
   11  *  Copyright (C) 2002 Intersil Americas Inc.
   12  *  Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
   13  *  This program is free software; you can redistribute it and/or modify
   14  *  it under the terms of the GNU General Public License as published by
   15  *  the Free Software Foundation; either version 2 of the License
   16  *
   17  *  This program is distributed in the hope that it will be useful,
   18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   20  *  GNU General Public License for more details.
   21  *
   22  *  You should have received a copy of the GNU General Public License
   23  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
   24  *
   25  */
   26 
   27 #include <linux/module.h>
   28 #include <linux/gfp.h>
   29 
   30 #include <linux/pci.h>
   31 #include <linux/delay.h>
   32 #include <linux/netdevice.h>
   33 #include <linux/etherdevice.h>
   34 #include <linux/if_arp.h>
   35 #include <asm/byteorder.h>
   36 
   37 #include "prismcompat.h"
   38 #include "isl_38xx.h"
   39 #include "islpci_eth.h"
   40 #include "islpci_mgt.h"
   41 #include "oid_mgt.h"
   42 
   43 /******************************************************************************
   44     Network Interface functions
   45 ******************************************************************************/
   46 void
   47 islpci_eth_cleanup_transmit(islpci_private *priv,
   48 			    isl38xx_control_block *control_block)
   49 {
   50 	struct sk_buff *skb;
   51 	u32 index;
   52 
   53 	/* compare the control block read pointer with the free pointer */
   54 	while (priv->free_data_tx !=
   55 	       le32_to_cpu(control_block->
   56 			   device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
   57 		/* read the index of the first fragment to be freed */
   58 		index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
   59 
   60 		/* check for holes in the arrays caused by multi fragment frames
   61 		 * searching for the last fragment of a frame */
   62 		if (priv->pci_map_tx_address[index]) {
   63 			/* entry is the last fragment of a frame
   64 			 * free the skb structure and unmap pci memory */
   65 			skb = priv->data_low_tx[index];
   66 
   67 #if VERBOSE > SHOW_ERROR_MESSAGES
   68 			DEBUG(SHOW_TRACING,
   69 			      "cleanup skb %p skb->data %p skb->len %u truesize %u\n ",
   70 			      skb, skb->data, skb->len, skb->truesize);
   71 #endif
   72 
   73 			pci_unmap_single(priv->pdev,
   74 					 priv->pci_map_tx_address[index],
   75 					 skb->len, PCI_DMA_TODEVICE);
   76 			dev_kfree_skb_irq(skb);
   77 			skb = NULL;
   78 		}
   79 		/* increment the free data low queue pointer */
   80 		priv->free_data_tx++;
   81 	}
   82 }
   83 
   84 netdev_tx_t
   85 islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
   86 {
   87 	islpci_private *priv = netdev_priv(ndev);
   88 	isl38xx_control_block *cb = priv->control_block;
   89 	u32 index;
   90 	dma_addr_t pci_map_address;
   91 	int frame_size;
   92 	isl38xx_fragment *fragment;
   93 	int offset;
   94 	struct sk_buff *newskb;
   95 	int newskb_offset;
   96 	unsigned long flags;
   97 	unsigned char wds_mac[6];
   98 	u32 curr_frag;
   99 
  100 #if VERBOSE > SHOW_ERROR_MESSAGES
  101 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
  102 #endif
  103 
  104 	/* lock the driver code */
  105 	spin_lock_irqsave(&priv->slock, flags);
  106 
  107 	/* check whether the destination queue has enough fragments for the frame */
  108 	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
  109 	if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
  110 		printk(KERN_ERR "%s: transmit device queue full when awake\n",
  111 		       ndev->name);
  112 		netif_stop_queue(ndev);
  113 
  114 		/* trigger the device */
  115 		isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
  116 				  ISL38XX_DEV_INT_REG);
  117 		udelay(ISL38XX_WRITEIO_DELAY);
  118 		goto drop_free;
  119 	}
  120 	/* Check alignment and WDS frame formatting. The start of the packet should
  121 	 * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
  122 	 * and add WDS address information */
  123 	if (likely(((long) skb->data & 0x03) | init_wds)) {
  124 		/* get the number of bytes to add and re-align */
  125 		offset = (4 - (long) skb->data) & 0x03;
  126 		offset += init_wds ? 6 : 0;
  127 
  128 		/* check whether the current skb can be used  */
  129 		if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
  130 			unsigned char *src = skb->data;
  131 
  132 #if VERBOSE > SHOW_ERROR_MESSAGES
  133 			DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
  134 			      init_wds);
  135 #endif
  136 
  137 			/* align the buffer on 4-byte boundary */
  138 			skb_reserve(skb, (4 - (long) skb->data) & 0x03);
  139 			if (init_wds) {
  140 				/* wds requires an additional address field of 6 bytes */
  141 				skb_put(skb, 6);
  142 #ifdef ISLPCI_ETH_DEBUG
  143 				printk("islpci_eth_transmit:wds_mac\n");
  144 #endif
  145 				memmove(skb->data + 6, src, skb->len);
  146 				skb_copy_to_linear_data(skb, wds_mac, 6);
  147 			} else {
  148 				memmove(skb->data, src, skb->len);
  149 			}
  150 
  151 #if VERBOSE > SHOW_ERROR_MESSAGES
  152 			DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
  153 			      src, skb->len);
  154 #endif
  155 		} else {
  156 			newskb =
  157 			    dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
  158 			if (unlikely(newskb == NULL)) {
  159 				printk(KERN_ERR "%s: Cannot allocate skb\n",
  160 				       ndev->name);
  161 				goto drop_free;
  162 			}
  163 			newskb_offset = (4 - (long) newskb->data) & 0x03;
  164 
  165 			/* Check if newskb->data is aligned */
  166 			if (newskb_offset)
  167 				skb_reserve(newskb, newskb_offset);
  168 
  169 			skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
  170 			if (init_wds) {
  171 				skb_copy_from_linear_data(skb,
  172 							  newskb->data + 6,
  173 							  skb->len);
  174 				skb_copy_to_linear_data(newskb, wds_mac, 6);
  175 #ifdef ISLPCI_ETH_DEBUG
  176 				printk("islpci_eth_transmit:wds_mac\n");
  177 #endif
  178 			} else
  179 				skb_copy_from_linear_data(skb, newskb->data,
  180 							  skb->len);
  181 
  182 #if VERBOSE > SHOW_ERROR_MESSAGES
  183 			DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
  184 			      newskb->data, skb->data, skb->len, init_wds);
  185 #endif
  186 
  187 			newskb->dev = skb->dev;
  188 			dev_kfree_skb_irq(skb);
  189 			skb = newskb;
  190 		}
  191 	}
  192 	/* display the buffer contents for debugging */
  193 #if VERBOSE > SHOW_ERROR_MESSAGES
  194 	DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
  195 	display_buffer((char *) skb->data, skb->len);
  196 #endif
  197 
  198 	/* map the skb buffer to pci memory for DMA operation */
  199 	pci_map_address = pci_map_single(priv->pdev,
  200 					 (void *) skb->data, skb->len,
  201 					 PCI_DMA_TODEVICE);
  202 	if (unlikely(pci_map_address == 0)) {
  203 		printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
  204 		       ndev->name);
  205 		goto drop_free;
  206 	}
  207 	/* Place the fragment in the control block structure. */
  208 	index = curr_frag % ISL38XX_CB_TX_QSIZE;
  209 	fragment = &cb->tx_data_low[index];
  210 
  211 	priv->pci_map_tx_address[index] = pci_map_address;
  212 	/* store the skb address for future freeing  */
  213 	priv->data_low_tx[index] = skb;
  214 	/* set the proper fragment start address and size information */
  215 	frame_size = skb->len;
  216 	fragment->size = cpu_to_le16(frame_size);
  217 	fragment->flags = cpu_to_le16(0);	/* set to 1 if more fragments */
  218 	fragment->address = cpu_to_le32(pci_map_address);
  219 	curr_frag++;
  220 
  221 	/* The fragment address in the control block must have been
  222 	 * written before announcing the frame buffer to device. */
  223 	wmb();
  224 	cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
  225 
  226 	if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
  227 	    > ISL38XX_CB_TX_QSIZE) {
  228 		/* stop sends from upper layers */
  229 		netif_stop_queue(ndev);
  230 
  231 		/* set the full flag for the transmission queue */
  232 		priv->data_low_tx_full = 1;
  233 	}
  234 
  235 	ndev->stats.tx_packets++;
  236 	ndev->stats.tx_bytes += skb->len;
  237 
  238 	/* trigger the device */
  239 	islpci_trigger(priv);
  240 
  241 	/* unlock the driver code */
  242 	spin_unlock_irqrestore(&priv->slock, flags);
  243 
  244 	return NETDEV_TX_OK;
  245 
  246       drop_free:
  247 	ndev->stats.tx_dropped++;
  248 	spin_unlock_irqrestore(&priv->slock, flags);
  249 	dev_kfree_skb(skb);
  250 	return NETDEV_TX_OK;
  251 }
  252 
  253 static inline int
  254 islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
  255 {
  256 	/* The card reports full 802.11 packets but with a 20 bytes
  257 	 * header and without the FCS. But there a is a bit that
  258 	 * indicates if the packet is corrupted :-) */
  259 	struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
  260 
  261 	if (hdr->flags & 0x01)
  262 		/* This one is bad. Drop it ! */
  263 		return -1;
  264 	if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
  265 		struct avs_80211_1_header *avs;
  266 		/* extract the relevant data from the header */
  267 		u32 clock = le32_to_cpu(hdr->clock);
  268 		u8 rate = hdr->rate;
  269 		u16 freq = le16_to_cpu(hdr->freq);
  270 		u8 rssi = hdr->rssi;
  271 
  272 		skb_pull(*skb, sizeof (struct rfmon_header));
  273 
  274 		if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
  275 			struct sk_buff *newskb = skb_copy_expand(*skb,
  276 								 sizeof (struct
  277 									 avs_80211_1_header),
  278 								 0, GFP_ATOMIC);
  279 			if (newskb) {
  280 				dev_kfree_skb_irq(*skb);
  281 				*skb = newskb;
  282 			} else
  283 				return -1;
  284 			/* This behavior is not very subtile... */
  285 		}
  286 
  287 		/* make room for the new header and fill it. */
  288 		avs =
  289 		    (struct avs_80211_1_header *) skb_push(*skb,
  290 							   sizeof (struct
  291 								   avs_80211_1_header));
  292 
  293 		avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
  294 		avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
  295 		avs->mactime = cpu_to_be64(clock);
  296 		avs->hosttime = cpu_to_be64(jiffies);
  297 		avs->phytype = cpu_to_be32(6);	/*OFDM: 6 for (g), 8 for (a) */
  298 		avs->channel = cpu_to_be32(channel_of_freq(freq));
  299 		avs->datarate = cpu_to_be32(rate * 5);
  300 		avs->antenna = cpu_to_be32(0);	/*unknown */
  301 		avs->priority = cpu_to_be32(0);	/*unknown */
  302 		avs->ssi_type = cpu_to_be32(3);	/*2: dBm, 3: raw RSSI */
  303 		avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
  304 		avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise);	/*better than 'undefined', I assume */
  305 		avs->preamble = cpu_to_be32(0);	/*unknown */
  306 		avs->encoding = cpu_to_be32(0);	/*unknown */
  307 	} else
  308 		skb_pull(*skb, sizeof (struct rfmon_header));
  309 
  310 	(*skb)->protocol = htons(ETH_P_802_2);
  311 	skb_reset_mac_header(*skb);
  312 	(*skb)->pkt_type = PACKET_OTHERHOST;
  313 
  314 	return 0;
  315 }
  316 
  317 int
  318 islpci_eth_receive(islpci_private *priv)
  319 {
  320 	struct net_device *ndev = priv->ndev;
  321 	isl38xx_control_block *control_block = priv->control_block;
  322 	struct sk_buff *skb;
  323 	u16 size;
  324 	u32 index, offset;
  325 	unsigned char *src;
  326 	int discard = 0;
  327 
  328 #if VERBOSE > SHOW_ERROR_MESSAGES
  329 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
  330 #endif
  331 
  332 	/* the device has written an Ethernet frame in the data area
  333 	 * of the sk_buff without updating the structure, do it now */
  334 	index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
  335 	size = le16_to_cpu(control_block->rx_data_low[index].size);
  336 	skb = priv->data_low_rx[index];
  337 	offset = ((unsigned long)
  338 		  le32_to_cpu(control_block->rx_data_low[index].address) -
  339 		  (unsigned long) skb->data) & 3;
  340 
  341 #if VERBOSE > SHOW_ERROR_MESSAGES
  342 	DEBUG(SHOW_TRACING,
  343 	      "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
  344 	      control_block->rx_data_low[priv->free_data_rx].address, skb->data,
  345 	      skb->len, offset, skb->truesize);
  346 #endif
  347 
  348 	/* delete the streaming DMA mapping before processing the skb */
  349 	pci_unmap_single(priv->pdev,
  350 			 priv->pci_map_rx_address[index],
  351 			 MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
  352 
  353 	/* update the skb structure and align the buffer */
  354 	skb_put(skb, size);
  355 	if (offset) {
  356 		/* shift the buffer allocation offset bytes to get the right frame */
  357 		skb_pull(skb, 2);
  358 		skb_put(skb, 2);
  359 	}
  360 #if VERBOSE > SHOW_ERROR_MESSAGES
  361 	/* display the buffer contents for debugging */
  362 	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
  363 	display_buffer((char *) skb->data, skb->len);
  364 #endif
  365 
  366 	/* check whether WDS is enabled and whether the data frame is a WDS frame */
  367 
  368 	if (init_wds) {
  369 		/* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
  370 		src = skb->data + 6;
  371 		memmove(skb->data, src, skb->len - 6);
  372 		skb_trim(skb, skb->len - 6);
  373 	}
  374 #if VERBOSE > SHOW_ERROR_MESSAGES
  375 	DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
  376 	DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
  377 
  378 	/* display the buffer contents for debugging */
  379 	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
  380 	display_buffer((char *) skb->data, skb->len);
  381 #endif
  382 	/* take care of monitor mode and spy monitoring. */
  383 	if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
  384 		skb->dev = ndev;
  385 		discard = islpci_monitor_rx(priv, &skb);
  386 	} else {
  387 		if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
  388 			/* The packet has a rx_annex. Read it for spy monitoring, Then
  389 			 * remove it, while keeping the 2 leading MAC addr.
  390 			 */
  391 			struct iw_quality wstats;
  392 			struct rx_annex_header *annex =
  393 			    (struct rx_annex_header *) skb->data;
  394 			wstats.level = annex->rfmon.rssi;
  395 			/* The noise value can be a bit outdated if nobody's
  396 			 * reading wireless stats... */
  397 			wstats.noise = priv->local_iwstatistics.qual.noise;
  398 			wstats.qual = wstats.level - wstats.noise;
  399 			wstats.updated = 0x07;
  400 			/* Update spy records */
  401 			wireless_spy_update(ndev, annex->addr2, &wstats);
  402 
  403 			skb_copy_from_linear_data(skb,
  404 						  (skb->data +
  405 						   sizeof(struct rfmon_header)),
  406 						  2 * ETH_ALEN);
  407 			skb_pull(skb, sizeof (struct rfmon_header));
  408 		}
  409 		skb->protocol = eth_type_trans(skb, ndev);
  410 	}
  411 	skb->ip_summed = CHECKSUM_NONE;
  412 	ndev->stats.rx_packets++;
  413 	ndev->stats.rx_bytes += size;
  414 
  415 	/* deliver the skb to the network layer */
  416 #ifdef ISLPCI_ETH_DEBUG
  417 	printk
  418 	    ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
  419 	     skb->data[0], skb->data[1], skb->data[2], skb->data[3],
  420 	     skb->data[4], skb->data[5]);
  421 #endif
  422 	if (unlikely(discard)) {
  423 		dev_kfree_skb_irq(skb);
  424 		skb = NULL;
  425 	} else
  426 		netif_rx(skb);
  427 
  428 	/* increment the read index for the rx data low queue */
  429 	priv->free_data_rx++;
  430 
  431 	/* add one or more sk_buff structures */
  432 	while (index =
  433 	       le32_to_cpu(control_block->
  434 			   driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
  435 	       index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
  436 		/* allocate an sk_buff for received data frames storage
  437 		 * include any required allignment operations */
  438 		skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
  439 		if (unlikely(skb == NULL)) {
  440 			/* error allocating an sk_buff structure elements */
  441 			DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
  442 			break;
  443 		}
  444 		skb_reserve(skb, (4 - (long) skb->data) & 0x03);
  445 		/* store the new skb structure pointer */
  446 		index = index % ISL38XX_CB_RX_QSIZE;
  447 		priv->data_low_rx[index] = skb;
  448 
  449 #if VERBOSE > SHOW_ERROR_MESSAGES
  450 		DEBUG(SHOW_TRACING,
  451 		      "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
  452 		      skb, skb->data, skb->len, index, skb->truesize);
  453 #endif
  454 
  455 		/* set the streaming DMA mapping for proper PCI bus operation */
  456 		priv->pci_map_rx_address[index] =
  457 		    pci_map_single(priv->pdev, (void *) skb->data,
  458 				   MAX_FRAGMENT_SIZE_RX + 2,
  459 				   PCI_DMA_FROMDEVICE);
  460 		if (unlikely(!priv->pci_map_rx_address[index])) {
  461 			/* error mapping the buffer to device accessible memory address */
  462 			DEBUG(SHOW_ERROR_MESSAGES,
  463 			      "Error mapping DMA address\n");
  464 
  465 			/* free the skbuf structure before aborting */
  466 			dev_kfree_skb_irq(skb);
  467 			skb = NULL;
  468 			break;
  469 		}
  470 		/* update the fragment address */
  471 		control_block->rx_data_low[index].address =
  472 			cpu_to_le32((u32)priv->pci_map_rx_address[index]);
  473 		wmb();
  474 
  475 		/* increment the driver read pointer */
  476 		le32_add_cpu(&control_block->
  477 			     driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
  478 	}
  479 
  480 	/* trigger the device */
  481 	islpci_trigger(priv);
  482 
  483 	return 0;
  484 }
  485 
  486 void
  487 islpci_do_reset_and_wake(struct work_struct *work)
  488 {
  489 	islpci_private *priv = container_of(work, islpci_private, reset_task);
  490 
  491 	islpci_reset(priv, 1);
  492 	priv->reset_task_pending = 0;
  493 	smp_wmb();
  494 	netif_wake_queue(priv->ndev);
  495 }
  496 
  497 void
  498 islpci_eth_tx_timeout(struct net_device *ndev)
  499 {
  500 	islpci_private *priv = netdev_priv(ndev);
  501 
  502 	/* increment the transmit error counter */
  503 	ndev->stats.tx_errors++;
  504 
  505 	if (!priv->reset_task_pending) {
  506 		printk(KERN_WARNING
  507 			"%s: tx_timeout, scheduling reset", ndev->name);
  508 		netif_stop_queue(ndev);
  509 		priv->reset_task_pending = 1;
  510 		schedule_work(&priv->reset_task);
  511 	} else {
  512 		printk(KERN_WARNING
  513 			"%s: tx_timeout, waiting for reset", ndev->name);
  514 	}
  515 }
  516 
  517 #line 9 "/home/druidos/temp/331_1a/work/current--X--drivers--X--defaultlinux-3.14.1.tar.xz--X--331_1a--X--cpachecker/linux-3.14.1.tar.xz/csd_deg_dscv/6673/dscv_tempdir/dscv/ri/331_1a/drivers/net/wireless/prism54/islpci_eth.o.c.prepared"                 1 
    2 /*
    3  *  Copyright (C) 2002 Intersil Americas Inc.
    4  *  Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
    5  *
    6  *  This program is free software; you can redistribute it and/or modify
    7  *  it under the terms of the GNU General Public License as published by
    8  *  the Free Software Foundation; either version 2 of the License
    9  *
   10  *  This program is distributed in the hope that it will be useful,
   11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   13  *  GNU General Public License for more details.
   14  *
   15  *  You should have received a copy of the GNU General Public License
   16  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
   17  *
   18  */
   19 
   20 #include <linux/interrupt.h>
   21 #include <linux/module.h>
   22 #include <linux/pci.h>
   23 #include <linux/delay.h>
   24 #include <linux/init.h> /* For __init, __exit */
   25 #include <linux/dma-mapping.h>
   26 
   27 #include "prismcompat.h"
   28 #include "islpci_dev.h"
   29 #include "islpci_mgt.h"		/* for pc_debug */
   30 #include "isl_oid.h"
   31 
   32 MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>");
   33 MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
   34 MODULE_LICENSE("GPL");
   35 
   36 static int	init_pcitm = 0;
   37 module_param(init_pcitm, int, 0);
   38 
   39 /* In this order: vendor, device, subvendor, subdevice, class, class_mask,
   40  * driver_data
   41  * If you have an update for this please contact prism54-devel@prism54.org
   42  * The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */
   43 static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
   44 	/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
   45 	{
   46 	 0x1260, 0x3890,
   47 	 PCI_ANY_ID, PCI_ANY_ID,
   48 	 0, 0, 0
   49 	},
   50 
   51 	/* 3COM 3CRWE154G72 Wireless LAN adapter */
   52 	{
   53 	 PCI_VDEVICE(3COM, 0x6001), 0
   54 	},
   55 
   56 	/* Intersil PRISM Indigo Wireless LAN adapter */
   57 	{
   58 	 0x1260, 0x3877,
   59 	 PCI_ANY_ID, PCI_ANY_ID,
   60 	 0, 0, 0
   61 	},
   62 
   63 	/* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
   64 	{
   65 	 0x1260, 0x3886,
   66 	 PCI_ANY_ID, PCI_ANY_ID,
   67 	 0, 0, 0
   68 	},
   69 
   70 	/* End of list */
   71 	{0,0,0,0,0,0,0}
   72 };
   73 
   74 /* register the device with the Hotplug facilities of the kernel */
   75 MODULE_DEVICE_TABLE(pci, prism54_id_tbl);
   76 
   77 static int prism54_probe(struct pci_dev *, const struct pci_device_id *);
   78 static void prism54_remove(struct pci_dev *);
   79 static int prism54_suspend(struct pci_dev *, pm_message_t state);
   80 static int prism54_resume(struct pci_dev *);
   81 
   82 static struct pci_driver prism54_driver = {
   83 	.name = DRV_NAME,
   84 	.id_table = prism54_id_tbl,
   85 	.probe = prism54_probe,
   86 	.remove = prism54_remove,
   87 	.suspend = prism54_suspend,
   88 	.resume = prism54_resume,
   89 };
   90 
   91 /******************************************************************************
   92     Module initialization functions
   93 ******************************************************************************/
   94 
   95 static int
   96 prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
   97 {
   98 	struct net_device *ndev;
   99 	u8 latency_tmr;
  100 	u32 mem_addr;
  101 	islpci_private *priv;
  102 	int rvalue;
  103 
  104 	/* Enable the pci device */
  105 	if (pci_enable_device(pdev)) {
  106 		printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME);
  107 		return -ENODEV;
  108 	}
  109 
  110 	/* check whether the latency timer is set correctly */
  111 	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr);
  112 #if VERBOSE > SHOW_ERROR_MESSAGES
  113 	DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr);
  114 #endif
  115 	if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) {
  116 		/* set the latency timer */
  117 		pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
  118 				      PCIDEVICE_LATENCY_TIMER_VAL);
  119 	}
  120 
  121 	/* enable PCI DMA */
  122 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
  123 		printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME);
  124 		goto do_pci_disable_device;
  125         }
  126 
  127 	/* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT)
  128 	 * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT)
  129 	 *	The RETRY_TIMEOUT is used to set the number of retries that the core, as a
  130 	 *	Master, will perform before abandoning a cycle. The default value for
  131 	 *	RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new
  132 	 *	devices. A write of zero to the RETRY_TIMEOUT register disables this
  133 	 *	function to allow use with any non-compliant legacy devices that may
  134 	 *	execute more retries.
  135 	 *
  136 	 *	Writing zero to both these two registers will disable both timeouts and
  137 	 *	*can* solve problems caused by devices that are slow to respond.
  138 	 *	Make this configurable - MSW
  139 	 */
  140 	if ( init_pcitm >= 0 ) {
  141 		pci_write_config_byte(pdev, 0x40, (u8)init_pcitm);
  142 		pci_write_config_byte(pdev, 0x41, (u8)init_pcitm);
  143 	} else {
  144 		printk(KERN_INFO "PCI TRDY/RETRY unchanged\n");
  145 	}
  146 
  147 	/* request the pci device I/O regions */
  148 	rvalue = pci_request_regions(pdev, DRV_NAME);
  149 	if (rvalue) {
  150 		printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n",
  151 		       DRV_NAME, rvalue);
  152 		goto do_pci_disable_device;
  153 	}
  154 
  155 	/* check if the memory window is indeed set */
  156 	rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr);
  157 	if (rvalue || !mem_addr) {
  158 		printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n",
  159 		       DRV_NAME);
  160 		goto do_pci_release_regions;
  161 	}
  162 
  163 	/* enable PCI bus-mastering */
  164 	DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME);
  165 	pci_set_master(pdev);
  166 
  167 	/* enable MWI */
  168 	pci_try_set_mwi(pdev);
  169 
  170 	/* setup the network device interface and its structure */
  171 	if (!(ndev = islpci_setup(pdev))) {
  172 		/* error configuring the driver as a network device */
  173 		printk(KERN_ERR "%s: could not configure network device\n",
  174 		       DRV_NAME);
  175 		goto do_pci_clear_mwi;
  176 	}
  177 
  178 	priv = netdev_priv(ndev);
  179 	islpci_set_state(priv, PRV_STATE_PREBOOT); /* we are attempting to boot */
  180 
  181 	/* card is in unknown state yet, might have some interrupts pending */
  182 	isl38xx_disable_interrupts(priv->device_base);
  183 
  184 	/* request for the interrupt before uploading the firmware */
  185 	rvalue = request_irq(pdev->irq, islpci_interrupt,
  186 			     IRQF_SHARED, ndev->name, priv);
  187 
  188 	if (rvalue) {
  189 		/* error, could not hook the handler to the irq */
  190 		printk(KERN_ERR "%s: could not install IRQ handler\n",
  191 		       ndev->name);
  192 		goto do_unregister_netdev;
  193 	}
  194 
  195 	/* firmware upload is triggered in islpci_open */
  196 
  197 	return 0;
  198 
  199       do_unregister_netdev:
  200 	unregister_netdev(ndev);
  201 	islpci_free_memory(priv);
  202 	free_netdev(ndev);
  203 	priv = NULL;
  204       do_pci_clear_mwi:
  205 	pci_clear_mwi(pdev);
  206       do_pci_release_regions:
  207 	pci_release_regions(pdev);
  208       do_pci_disable_device:
  209 	pci_disable_device(pdev);
  210 	return -EIO;
  211 }
  212 
  213 /* set by cleanup_module */
  214 static volatile int __in_cleanup_module = 0;
  215 
  216 /* this one removes one(!!) instance only */
  217 static void
  218 prism54_remove(struct pci_dev *pdev)
  219 {
  220 	struct net_device *ndev = pci_get_drvdata(pdev);
  221 	islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
  222 	BUG_ON(!priv);
  223 
  224 	if (!__in_cleanup_module) {
  225 		printk(KERN_DEBUG "%s: hot unplug detected\n", ndev->name);
  226 		islpci_set_state(priv, PRV_STATE_OFF);
  227 	}
  228 
  229 	printk(KERN_DEBUG "%s: removing device\n", ndev->name);
  230 
  231 	unregister_netdev(ndev);
  232 
  233 	/* free the interrupt request */
  234 
  235 	if (islpci_get_state(priv) != PRV_STATE_OFF) {
  236 		isl38xx_disable_interrupts(priv->device_base);
  237 		islpci_set_state(priv, PRV_STATE_OFF);
  238 		/* This bellow causes a lockup at rmmod time. It might be
  239 		 * because some interrupts still linger after rmmod time,
  240 		 * see bug #17 */
  241 		/* pci_set_power_state(pdev, 3);*/	/* try to power-off */
  242 	}
  243 
  244 	free_irq(pdev->irq, priv);
  245 
  246 	/* free the PCI memory and unmap the remapped page */
  247 	islpci_free_memory(priv);
  248 
  249 	free_netdev(ndev);
  250 	priv = NULL;
  251 
  252 	pci_clear_mwi(pdev);
  253 
  254 	pci_release_regions(pdev);
  255 
  256 	pci_disable_device(pdev);
  257 }
  258 
  259 static int
  260 prism54_suspend(struct pci_dev *pdev, pm_message_t state)
  261 {
  262 	struct net_device *ndev = pci_get_drvdata(pdev);
  263 	islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
  264 	BUG_ON(!priv);
  265 
  266 
  267 	pci_save_state(pdev);
  268 
  269 	/* tell the device not to trigger interrupts for now... */
  270 	isl38xx_disable_interrupts(priv->device_base);
  271 
  272 	/* from now on assume the hardware was already powered down
  273 	   and don't touch it anymore */
  274 	islpci_set_state(priv, PRV_STATE_OFF);
  275 
  276 	netif_stop_queue(ndev);
  277 	netif_device_detach(ndev);
  278 
  279 	return 0;
  280 }
  281 
  282 static int
  283 prism54_resume(struct pci_dev *pdev)
  284 {
  285 	struct net_device *ndev = pci_get_drvdata(pdev);
  286 	islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
  287 	int err;
  288 
  289 	BUG_ON(!priv);
  290 
  291 	printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
  292 
  293 	err = pci_enable_device(pdev);
  294 	if (err) {
  295 		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
  296 		       ndev->name);
  297 		return err;
  298 	}
  299 
  300 	pci_restore_state(pdev);
  301 
  302 	/* alright let's go into the PREBOOT state */
  303 	islpci_reset(priv, 1);
  304 
  305 	netif_device_attach(ndev);
  306 	netif_start_queue(ndev);
  307 
  308 	return 0;
  309 }
  310 
  311 static int __init
  312 prism54_module_init(void)
  313 {
  314 	printk(KERN_INFO "Loaded %s driver, version %s\n",
  315 	       DRV_NAME, DRV_VERSION);
  316 
  317 	__bug_on_wrong_struct_sizes ();
  318 
  319 	return pci_register_driver(&prism54_driver);
  320 }
  321 
  322 /* by the time prism54_module_exit() terminates, as a postcondition
  323  * all instances will have been destroyed by calls to
  324  * prism54_remove() */
  325 static void __exit
  326 prism54_module_exit(void)
  327 {
  328 	__in_cleanup_module = 1;
  329 
  330 	pci_unregister_driver(&prism54_driver);
  331 
  332 	printk(KERN_INFO "Unloaded %s driver\n", DRV_NAME);
  333 
  334 	__in_cleanup_module = 0;
  335 }
  336 
  337 /* register entry points */
  338 module_init(prism54_module_init);
  339 module_exit(prism54_module_exit);
  340 /* EOF */
  341 
  342 
  343 
  344 
  345 
  346 /* LDV_COMMENT_BEGIN_MAIN */
  347 #ifdef LDV_MAIN5_sequence_infinite_withcheck_stateful
  348 
  349 /*###########################################################################*/
  350 
  351 /*############## Driver Environment Generator 0.2 output ####################*/
  352 
  353 /*###########################################################################*/
  354 
  355 
  356 
  357 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  358 void ldv_check_final_state(void);
  359 
  360 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
  361 void ldv_check_return_value(int res);
  362 
  363 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
  364 void ldv_check_return_value_probe(int res);
  365 
  366 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
  367 void ldv_initialize(void);
  368 
  369 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
  370 void ldv_handler_precall(void);
  371 
  372 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
  373 int nondet_int(void);
  374 
  375 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
  376 int LDV_IN_INTERRUPT;
  377 
  378 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
  379 void ldv_main5_sequence_infinite_withcheck_stateful(void) {
  380 
  381 
  382 
  383 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
  384 	/*============================= VARIABLE DECLARATION PART   =============================*/
  385 	/** STRUCT: struct type: pci_driver, struct name: prism54_driver **/
  386 	/* content: static int prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
  387 	/* LDV_COMMENT_END_PREP */
  388 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prism54_probe" */
  389 	struct pci_dev * var_group1;
  390 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prism54_probe" */
  391 	const struct pci_device_id * var_prism54_probe_0_p1;
  392 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "prism54_probe" */
  393 	static int res_prism54_probe_0;
  394 	/* content: static void prism54_remove(struct pci_dev *pdev)*/
  395 	/* LDV_COMMENT_BEGIN_PREP */
  396 	#if VERBOSE > SHOW_ERROR_MESSAGES
  397 	#endif
  398 	/* LDV_COMMENT_END_PREP */
  399 	/* content: static int prism54_suspend(struct pci_dev *pdev, pm_message_t state)*/
  400 	/* LDV_COMMENT_BEGIN_PREP */
  401 	#if VERBOSE > SHOW_ERROR_MESSAGES
  402 	#endif
  403 	/* LDV_COMMENT_END_PREP */
  404 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "prism54_suspend" */
  405 	pm_message_t  var_prism54_suspend_2_p1;
  406 	/* content: static int prism54_resume(struct pci_dev *pdev)*/
  407 	/* LDV_COMMENT_BEGIN_PREP */
  408 	#if VERBOSE > SHOW_ERROR_MESSAGES
  409 	#endif
  410 	/* LDV_COMMENT_END_PREP */
  411 
  412 
  413 
  414 
  415 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
  416 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
  417 	/*============================= VARIABLE INITIALIZING PART  =============================*/
  418 	LDV_IN_INTERRUPT=1;
  419 
  420 
  421 
  422 
  423 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
  424 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
  425 	/*============================= FUNCTION CALL SECTION       =============================*/
  426 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
  427 	ldv_initialize();
  428 
  429 	/** INIT: init_type: ST_MODULE_INIT **/
  430 	/* content: static int __init prism54_module_init(void)*/
  431 	/* LDV_COMMENT_BEGIN_PREP */
  432 	#if VERBOSE > SHOW_ERROR_MESSAGES
  433 	#endif
  434 	/* LDV_COMMENT_END_PREP */
  435 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
  436 	ldv_handler_precall();
  437 	 if(prism54_module_init()) 
  438 		goto ldv_final;
  439 	int ldv_s_prism54_driver_pci_driver = 0;
  440 	
  441 
  442 
  443 	while(  nondet_int()
  444 		|| !(ldv_s_prism54_driver_pci_driver == 0)
  445 	) {
  446 
  447 		switch(nondet_int()) {
  448 
  449 			case 0: {
  450 
  451 				/** STRUCT: struct type: pci_driver, struct name: prism54_driver **/
  452 				if(ldv_s_prism54_driver_pci_driver==0) {
  453 
  454 				/* content: static int prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
  455 				/* LDV_COMMENT_END_PREP */
  456 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "prism54_driver". Standart function test for correct return result. */
  457 				res_prism54_probe_0 = prism54_probe( var_group1, var_prism54_probe_0_p1);
  458 				 ldv_check_return_value(res_prism54_probe_0);
  459 				 ldv_check_return_value_probe(res_prism54_probe_0);
  460 				 if(res_prism54_probe_0) 
  461 					goto ldv_module_exit;
  462 				ldv_s_prism54_driver_pci_driver++;
  463 
  464 				}
  465 
  466 			}
  467 
  468 			break;
  469 			case 1: {
  470 
  471 				/** STRUCT: struct type: pci_driver, struct name: prism54_driver **/
  472 				if(ldv_s_prism54_driver_pci_driver==1) {
  473 
  474 				/* content: static void prism54_remove(struct pci_dev *pdev)*/
  475 				/* LDV_COMMENT_BEGIN_PREP */
  476 				#if VERBOSE > SHOW_ERROR_MESSAGES
  477 				#endif
  478 				/* LDV_COMMENT_END_PREP */
  479 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "prism54_driver" */
  480 				ldv_handler_precall();
  481 				prism54_remove( var_group1);
  482 				ldv_s_prism54_driver_pci_driver=0;
  483 
  484 				}
  485 
  486 			}
  487 
  488 			break;
  489 			case 2: {
  490 
  491 				/** STRUCT: struct type: pci_driver, struct name: prism54_driver **/
  492 				
  493 
  494 				/* content: static int prism54_suspend(struct pci_dev *pdev, pm_message_t state)*/
  495 				/* LDV_COMMENT_BEGIN_PREP */
  496 				#if VERBOSE > SHOW_ERROR_MESSAGES
  497 				#endif
  498 				/* LDV_COMMENT_END_PREP */
  499 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "prism54_driver" */
  500 				ldv_handler_precall();
  501 				prism54_suspend( var_group1, var_prism54_suspend_2_p1);
  502 				
  503 
  504 				
  505 
  506 			}
  507 
  508 			break;
  509 			case 3: {
  510 
  511 				/** STRUCT: struct type: pci_driver, struct name: prism54_driver **/
  512 				
  513 
  514 				/* content: static int prism54_resume(struct pci_dev *pdev)*/
  515 				/* LDV_COMMENT_BEGIN_PREP */
  516 				#if VERBOSE > SHOW_ERROR_MESSAGES
  517 				#endif
  518 				/* LDV_COMMENT_END_PREP */
  519 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "prism54_driver" */
  520 				ldv_handler_precall();
  521 				prism54_resume( var_group1);
  522 				
  523 
  524 				
  525 
  526 			}
  527 
  528 			break;
  529 			default: break;
  530 
  531 		}
  532 
  533 	}
  534 
  535 	ldv_module_exit: 
  536 
  537 	/** INIT: init_type: ST_MODULE_EXIT **/
  538 	/* content: static void __exit prism54_module_exit(void)*/
  539 	/* LDV_COMMENT_BEGIN_PREP */
  540 	#if VERBOSE > SHOW_ERROR_MESSAGES
  541 	#endif
  542 	/* LDV_COMMENT_END_PREP */
  543 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
  544 	ldv_handler_precall();
  545 	prism54_module_exit();
  546 
  547 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
  548 	ldv_final: ldv_check_final_state();
  549 
  550 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
  551 	return;
  552 
  553 }
  554 #endif
  555 
  556 /* LDV_COMMENT_END_MAIN */                 1 /*
    2  *  Copyright (C) 2002 Intersil Americas Inc.
    3  *  Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
    4  *
    5  *  This program is free software; you can redistribute it and/or modify
    6  *  it under the terms of the GNU General Public License as published by
    7  *  the Free Software Foundation; either version 2 of the License
    8  *
    9  *  This program is distributed in the hope that it will be useful,
   10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  *  GNU General Public License for more details.
   13  *
   14  *  You should have received a copy of the GNU General Public License
   15  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
   16  *
   17  */
   18 
   19 #include <linux/netdevice.h>
   20 #include <linux/module.h>
   21 #include <linux/pci.h>
   22 #include <linux/sched.h>
   23 #include <linux/slab.h>
   24 
   25 #include <asm/io.h>
   26 #include <linux/if_arp.h>
   27 
   28 #include "prismcompat.h"
   29 #include "isl_38xx.h"
   30 #include "islpci_mgt.h"
   31 #include "isl_oid.h"		/* additional types and defs for isl38xx fw */
   32 #include "isl_ioctl.h"
   33 
   34 #include <net/iw_handler.h>
   35 
   36 /******************************************************************************
   37         Global variable definition section
   38 ******************************************************************************/
   39 int pc_debug = VERBOSE;
   40 module_param(pc_debug, int, 0);
   41 
   42 /******************************************************************************
   43     Driver general functions
   44 ******************************************************************************/
   45 #if VERBOSE > SHOW_ERROR_MESSAGES
   46 void
   47 display_buffer(char *buffer, int length)
   48 {
   49 	if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0)
   50 		return;
   51 
   52 	while (length > 0) {
   53 		printk("[%02x]", *buffer & 255);
   54 		length--;
   55 		buffer++;
   56 	}
   57 
   58 	printk("\n");
   59 }
   60 #endif
   61 
   62 /*****************************************************************************
   63     Queue handling for management frames
   64 ******************************************************************************/
   65 
   66 /*
   67  * Helper function to create a PIMFOR management frame header.
   68  */
   69 static void
   70 pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h)
   71 {
   72 	h->version = PIMFOR_VERSION;
   73 	h->operation = operation;
   74 	h->device_id = PIMFOR_DEV_ID_MHLI_MIB;
   75 	h->flags = 0;
   76 	h->oid = cpu_to_be32(oid);
   77 	h->length = cpu_to_be32(length);
   78 }
   79 
   80 /*
   81  * Helper function to analyze a PIMFOR management frame header.
   82  */
   83 static pimfor_header_t *
   84 pimfor_decode_header(void *data, int len)
   85 {
   86 	pimfor_header_t *h = data;
   87 
   88 	while ((void *) h < data + len) {
   89 		if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) {
   90 			le32_to_cpus(&h->oid);
   91 			le32_to_cpus(&h->length);
   92 		} else {
   93 			be32_to_cpus(&h->oid);
   94 			be32_to_cpus(&h->length);
   95 		}
   96 		if (h->oid != OID_INL_TUNNEL)
   97 			return h;
   98 		h++;
   99 	}
  100 	return NULL;
  101 }
  102 
  103 /*
  104  * Fill the receive queue for management frames with fresh buffers.
  105  */
  106 int
  107 islpci_mgmt_rx_fill(struct net_device *ndev)
  108 {
  109 	islpci_private *priv = netdev_priv(ndev);
  110 	isl38xx_control_block *cb =	/* volatile not needed */
  111 	    (isl38xx_control_block *) priv->control_block;
  112 	u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
  113 
  114 #if VERBOSE > SHOW_ERROR_MESSAGES
  115 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
  116 #endif
  117 
  118 	while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
  119 		u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
  120 		struct islpci_membuf *buf = &priv->mgmt_rx[index];
  121 		isl38xx_fragment *frag = &cb->rx_data_mgmt[index];
  122 
  123 		if (buf->mem == NULL) {
  124 			buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
  125 			if (!buf->mem)
  126 				return -ENOMEM;
  127 			buf->size = MGMT_FRAME_SIZE;
  128 		}
  129 		if (buf->pci_addr == 0) {
  130 			buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
  131 						       MGMT_FRAME_SIZE,
  132 						       PCI_DMA_FROMDEVICE);
  133 			if (!buf->pci_addr) {
  134 				printk(KERN_WARNING
  135 				       "Failed to make memory DMA'able.\n");
  136 				return -ENOMEM;
  137 			}
  138 		}
  139 
  140 		/* be safe: always reset control block information */
  141 		frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
  142 		frag->flags = 0;
  143 		frag->address = cpu_to_le32(buf->pci_addr);
  144 		curr++;
  145 
  146 		/* The fragment address in the control block must have
  147 		 * been written before announcing the frame buffer to
  148 		 * device */
  149 		wmb();
  150 		cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
  151 	}
  152 	return 0;
  153 }
  154 
  155 /*
  156  * Create and transmit a management frame using "operation" and "oid",
  157  * with arguments data/length.
  158  * We either return an error and free the frame, or we return 0 and
  159  * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
  160  * interrupt.
  161  */
  162 static int
  163 islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
  164 		    void *data, int length)
  165 {
  166 	islpci_private *priv = netdev_priv(ndev);
  167 	isl38xx_control_block *cb =
  168 	    (isl38xx_control_block *) priv->control_block;
  169 	void *p;
  170 	int err = -EINVAL;
  171 	unsigned long flags;
  172 	isl38xx_fragment *frag;
  173 	struct islpci_membuf buf;
  174 	u32 curr_frag;
  175 	int index;
  176 	int frag_len = length + PIMFOR_HEADER_SIZE;
  177 
  178 #if VERBOSE > SHOW_ERROR_MESSAGES
  179 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n");
  180 #endif
  181 
  182 	if (frag_len > MGMT_FRAME_SIZE) {
  183 		printk(KERN_DEBUG "%s: mgmt frame too large %d\n",
  184 		       ndev->name, frag_len);
  185 		goto error;
  186 	}
  187 
  188 	err = -ENOMEM;
  189 	p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
  190 	if (!buf.mem)
  191 		goto error;
  192 
  193 	buf.size = frag_len;
  194 
  195 	/* create the header directly in the fragment data area */
  196 	pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p);
  197 	p += PIMFOR_HEADER_SIZE;
  198 
  199 	if (data)
  200 		memcpy(p, data, length);
  201 	else
  202 		memset(p, 0, length);
  203 
  204 #if VERBOSE > SHOW_ERROR_MESSAGES
  205 	{
  206 		pimfor_header_t *h = buf.mem;
  207 		DEBUG(SHOW_PIMFOR_FRAMES,
  208 		      "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
  209 		      h->operation, oid, h->device_id, h->flags, length);
  210 
  211 		/* display the buffer contents for debugging */
  212 		display_buffer((char *) h, sizeof (pimfor_header_t));
  213 		display_buffer(p, length);
  214 	}
  215 #endif
  216 
  217 	err = -ENOMEM;
  218 	buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len,
  219 				      PCI_DMA_TODEVICE);
  220 	if (!buf.pci_addr) {
  221 		printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
  222 		       ndev->name);
  223 		goto error_free;
  224 	}
  225 
  226 	/* Protect the control block modifications against interrupts. */
  227 	spin_lock_irqsave(&priv->slock, flags);
  228 	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]);
  229 	if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) {
  230 		printk(KERN_WARNING "%s: mgmt tx queue is still full\n",
  231 		       ndev->name);
  232 		goto error_unlock;
  233 	}
  234 
  235 	/* commit the frame to the tx device queue */
  236 	index = curr_frag % ISL38XX_CB_MGMT_QSIZE;
  237 	priv->mgmt_tx[index] = buf;
  238 	frag = &cb->tx_data_mgmt[index];
  239 	frag->size = cpu_to_le16(frag_len);
  240 	frag->flags = 0;	/* for any other than the last fragment, set to 1 */
  241 	frag->address = cpu_to_le32(buf.pci_addr);
  242 
  243 	/* The fragment address in the control block must have
  244 	 * been written before announcing the frame buffer to
  245 	 * device */
  246 	wmb();
  247 	cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1);
  248 	spin_unlock_irqrestore(&priv->slock, flags);
  249 
  250 	/* trigger the device */
  251 	islpci_trigger(priv);
  252 	return 0;
  253 
  254       error_unlock:
  255 	spin_unlock_irqrestore(&priv->slock, flags);
  256       error_free:
  257 	kfree(buf.mem);
  258       error:
  259 	return err;
  260 }
  261 
  262 /*
  263  * Receive a management frame from the device.
  264  * This can be an arbitrary number of traps, and at most one response
  265  * frame for a previous request sent via islpci_mgt_transmit().
  266  */
  267 int
  268 islpci_mgt_receive(struct net_device *ndev)
  269 {
  270 	islpci_private *priv = netdev_priv(ndev);
  271 	isl38xx_control_block *cb =
  272 	    (isl38xx_control_block *) priv->control_block;
  273 	u32 curr_frag;
  274 
  275 #if VERBOSE > SHOW_ERROR_MESSAGES
  276 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
  277 #endif
  278 
  279 	/* Only once per interrupt, determine fragment range to
  280 	 * process.  This avoids an endless loop (i.e. lockup) if
  281 	 * frames come in faster than we can process them. */
  282 	curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
  283 	barrier();
  284 
  285 	for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
  286 		pimfor_header_t *header;
  287 		u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
  288 		struct islpci_membuf *buf = &priv->mgmt_rx[index];
  289 		u16 frag_len;
  290 		int size;
  291 		struct islpci_mgmtframe *frame;
  292 
  293 		/* I have no idea (and no documentation) if flags != 0
  294 		 * is possible.  Drop the frame, reuse the buffer. */
  295 		if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
  296 			printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
  297 			       ndev->name,
  298 			       le16_to_cpu(cb->rx_data_mgmt[index].flags));
  299 			continue;
  300 		}
  301 
  302 		/* The device only returns the size of the header(s) here. */
  303 		frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);
  304 
  305 		/*
  306 		 * We appear to have no way to tell the device the
  307 		 * size of a receive buffer.  Thus, if this check
  308 		 * triggers, we likely have kernel heap corruption. */
  309 		if (frag_len > MGMT_FRAME_SIZE) {
  310 			printk(KERN_WARNING
  311 				"%s: Bogus packet size of %d (%#x).\n",
  312 				ndev->name, frag_len, frag_len);
  313 			frag_len = MGMT_FRAME_SIZE;
  314 		}
  315 
  316 		/* Ensure the results of device DMA are visible to the CPU. */
  317 		pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
  318 					    buf->size, PCI_DMA_FROMDEVICE);
  319 
  320 		/* Perform endianess conversion for PIMFOR header in-place. */
  321 		header = pimfor_decode_header(buf->mem, frag_len);
  322 		if (!header) {
  323 			printk(KERN_WARNING "%s: no PIMFOR header found\n",
  324 			       ndev->name);
  325 			continue;
  326 		}
  327 
  328 		/* The device ID from the PIMFOR packet received from
  329 		 * the MVC is always 0.  We forward a sensible device_id.
  330 		 * Not that anyone upstream would care... */
  331 		header->device_id = priv->ndev->ifindex;
  332 
  333 #if VERBOSE > SHOW_ERROR_MESSAGES
  334 		DEBUG(SHOW_PIMFOR_FRAMES,
  335 		      "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
  336 		      header->operation, header->oid, header->device_id,
  337 		      header->flags, header->length);
  338 
  339 		/* display the buffer contents for debugging */
  340 		display_buffer((char *) header, PIMFOR_HEADER_SIZE);
  341 		display_buffer((char *) header + PIMFOR_HEADER_SIZE,
  342 			       header->length);
  343 #endif
  344 
  345 		/* nobody sends these */
  346 		if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
  347 			printk(KERN_DEBUG
  348 			       "%s: errant PIMFOR application frame\n",
  349 			       ndev->name);
  350 			continue;
  351 		}
  352 
  353 		/* Determine frame size, skipping OID_INL_TUNNEL headers. */
  354 		size = PIMFOR_HEADER_SIZE + header->length;
  355 		frame = kmalloc(sizeof(struct islpci_mgmtframe) + size,
  356 				GFP_ATOMIC);
  357 		if (!frame)
  358 			continue;
  359 
  360 		frame->ndev = ndev;
  361 		memcpy(&frame->buf, header, size);
  362 		frame->header = (pimfor_header_t *) frame->buf;
  363 		frame->data = frame->buf + PIMFOR_HEADER_SIZE;
  364 
  365 #if VERBOSE > SHOW_ERROR_MESSAGES
  366 		DEBUG(SHOW_PIMFOR_FRAMES,
  367 		      "frame: header: %p, data: %p, size: %d\n",
  368 		      frame->header, frame->data, size);
  369 #endif
  370 
  371 		if (header->operation == PIMFOR_OP_TRAP) {
  372 #if VERBOSE > SHOW_ERROR_MESSAGES
  373 			printk(KERN_DEBUG
  374 			       "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
  375 			       header->oid, header->device_id, header->flags,
  376 			       header->length);
  377 #endif
  378 
  379 			/* Create work to handle trap out of interrupt
  380 			 * context. */
  381 			INIT_WORK(&frame->ws, prism54_process_trap);
  382 			schedule_work(&frame->ws);
  383 
  384 		} else {
  385 			/* Signal the one waiting process that a response
  386 			 * has been received. */
  387 			if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
  388 				printk(KERN_WARNING
  389 				       "%s: mgmt response not collected\n",
  390 				       ndev->name);
  391 				kfree(frame);
  392 			}
  393 #if VERBOSE > SHOW_ERROR_MESSAGES
  394 			DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
  395 #endif
  396 			wake_up(&priv->mgmt_wqueue);
  397 		}
  398 
  399 	}
  400 
  401 	return 0;
  402 }
  403 
  404 /*
  405  * Cleanup the transmit queue by freeing all frames handled by the device.
  406  */
  407 void
  408 islpci_mgt_cleanup_transmit(struct net_device *ndev)
  409 {
  410 	islpci_private *priv = netdev_priv(ndev);
  411 	isl38xx_control_block *cb =	/* volatile not needed */
  412 	    (isl38xx_control_block *) priv->control_block;
  413 	u32 curr_frag;
  414 
  415 #if VERBOSE > SHOW_ERROR_MESSAGES
  416 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n");
  417 #endif
  418 
  419 	/* Only once per cleanup, determine fragment range to
  420 	 * process.  This avoids an endless loop (i.e. lockup) if
  421 	 * the device became confused, incrementing device_curr_frag
  422 	 * rapidly. */
  423 	curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]);
  424 	barrier();
  425 
  426 	for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) {
  427 		int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE;
  428 		struct islpci_membuf *buf = &priv->mgmt_tx[index];
  429 		pci_unmap_single(priv->pdev, buf->pci_addr, buf->size,
  430 				 PCI_DMA_TODEVICE);
  431 		buf->pci_addr = 0;
  432 		kfree(buf->mem);
  433 		buf->mem = NULL;
  434 		buf->size = 0;
  435 	}
  436 }
  437 
  438 /*
  439  * Perform one request-response transaction to the device.
  440  */
  441 int
  442 islpci_mgt_transaction(struct net_device *ndev,
  443 		       int operation, unsigned long oid,
  444 		       void *senddata, int sendlen,
  445 		       struct islpci_mgmtframe **recvframe)
  446 {
  447 	islpci_private *priv = netdev_priv(ndev);
  448 	const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
  449 	long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
  450 	int err;
  451 	DEFINE_WAIT(wait);
  452 
  453 	*recvframe = NULL;
  454 
  455 	if (mutex_lock_interruptible(&priv->mgmt_lock))
  456 		return -ERESTARTSYS;
  457 
  458 	prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
  459 	err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
  460 	if (err)
  461 		goto out;
  462 
  463 	err = -ETIMEDOUT;
  464 	while (timeout_left > 0) {
  465 		int timeleft;
  466 		struct islpci_mgmtframe *frame;
  467 
  468 		timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
  469 		frame = xchg(&priv->mgmt_received, NULL);
  470 		if (frame) {
  471 			if (frame->header->oid == oid) {
  472 				*recvframe = frame;
  473 				err = 0;
  474 				goto out;
  475 			} else {
  476 				printk(KERN_DEBUG
  477 				       "%s: expecting oid 0x%x, received 0x%x.\n",
  478 				       ndev->name, (unsigned int) oid,
  479 				       frame->header->oid);
  480 				kfree(frame);
  481 				frame = NULL;
  482 			}
  483 		}
  484 		if (timeleft == 0) {
  485 			printk(KERN_DEBUG
  486 				"%s: timeout waiting for mgmt response %lu, "
  487 				"triggering device\n",
  488 				ndev->name, timeout_left);
  489 			islpci_trigger(priv);
  490 		}
  491 		timeout_left += timeleft - wait_cycle_jiffies;
  492 	}
  493 	printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
  494 	       ndev->name);
  495 
  496 	/* TODO: we should reset the device here */
  497  out:
  498 	finish_wait(&priv->mgmt_wqueue, &wait);
  499 	mutex_unlock(&priv->mgmt_lock);
  500 	return err;
  501 }                 1 /*
    2  *  Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
    3  *
    4  *  This program is free software; you can redistribute it and/or modify
    5  *  it under the terms of the GNU General Public License as published by
    6  *  the Free Software Foundation; either version 2 of the License
    7  *
    8  *  This program is distributed in the hope that it will be useful,
    9  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   10  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   11  *  GNU General Public License for more details.
   12  *
   13  *  You should have received a copy of the GNU General Public License
   14  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
   15  *
   16  */
   17 
   18 #include <linux/kernel.h>
   19 #include <linux/slab.h>
   20 
   21 #include "prismcompat.h"
   22 #include "islpci_dev.h"
   23 #include "islpci_mgt.h"
   24 #include "isl_oid.h"
   25 #include "oid_mgt.h"
   26 #include "isl_ioctl.h"
   27 
   28 /* to convert between channel and freq */
   29 static const int frequency_list_bg[] = { 2412, 2417, 2422, 2427, 2432,
   30 	2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484
   31 };
   32 
   33 int
   34 channel_of_freq(int f)
   35 {
   36 	int c = 0;
   37 
   38 	if ((f >= 2412) && (f <= 2484)) {
   39 		while ((c < 14) && (f != frequency_list_bg[c]))
   40 			c++;
   41 		return (c >= 14) ? 0 : ++c;
   42 	} else if ((f >= (int) 5000) && (f <= (int) 6000)) {
   43 		return ( (f - 5000) / 5 );
   44 	} else
   45 		return 0;
   46 }
   47 
   48 #define OID_STRUCT(name,oid,s,t) [name] = {oid, 0, sizeof(s), t}
   49 #define OID_STRUCT_C(name,oid,s,t) OID_STRUCT(name,oid,s,t | OID_FLAG_CACHED)
   50 #define OID_U32(name,oid) OID_STRUCT(name,oid,u32,OID_TYPE_U32)
   51 #define OID_U32_C(name,oid) OID_STRUCT_C(name,oid,u32,OID_TYPE_U32)
   52 #define OID_STRUCT_MLME(name,oid) OID_STRUCT(name,oid,struct obj_mlme,OID_TYPE_MLME)
   53 #define OID_STRUCT_MLMEEX(name,oid) OID_STRUCT(name,oid,struct obj_mlmeex,OID_TYPE_MLMEEX)
   54 
   55 #define OID_UNKNOWN(name,oid) OID_STRUCT(name,oid,0,0)
   56 
   57 struct oid_t isl_oid[] = {
   58 	OID_STRUCT(GEN_OID_MACADDRESS, 0x00000000, u8[6], OID_TYPE_ADDR),
   59 	OID_U32(GEN_OID_LINKSTATE, 0x00000001),
   60 	OID_UNKNOWN(GEN_OID_WATCHDOG, 0x00000002),
   61 	OID_UNKNOWN(GEN_OID_MIBOP, 0x00000003),
   62 	OID_UNKNOWN(GEN_OID_OPTIONS, 0x00000004),
   63 	OID_UNKNOWN(GEN_OID_LEDCONFIG, 0x00000005),
   64 
   65 	/* 802.11 */
   66 	OID_U32_C(DOT11_OID_BSSTYPE, 0x10000000),
   67 	OID_STRUCT_C(DOT11_OID_BSSID, 0x10000001, u8[6], OID_TYPE_RAW),
   68 	OID_STRUCT_C(DOT11_OID_SSID, 0x10000002, struct obj_ssid,
   69 		     OID_TYPE_SSID),
   70 	OID_U32(DOT11_OID_STATE, 0x10000003),
   71 	OID_U32(DOT11_OID_AID, 0x10000004),
   72 	OID_STRUCT(DOT11_OID_COUNTRYSTRING, 0x10000005, u8[4], OID_TYPE_RAW),
   73 	OID_STRUCT_C(DOT11_OID_SSIDOVERRIDE, 0x10000006, struct obj_ssid,
   74 		     OID_TYPE_SSID),
   75 
   76 	OID_U32(DOT11_OID_MEDIUMLIMIT, 0x11000000),
   77 	OID_U32_C(DOT11_OID_BEACONPERIOD, 0x11000001),
   78 	OID_U32(DOT11_OID_DTIMPERIOD, 0x11000002),
   79 	OID_U32(DOT11_OID_ATIMWINDOW, 0x11000003),
   80 	OID_U32(DOT11_OID_LISTENINTERVAL, 0x11000004),
   81 	OID_U32(DOT11_OID_CFPPERIOD, 0x11000005),
   82 	OID_U32(DOT11_OID_CFPDURATION, 0x11000006),
   83 
   84 	OID_U32_C(DOT11_OID_AUTHENABLE, 0x12000000),
   85 	OID_U32_C(DOT11_OID_PRIVACYINVOKED, 0x12000001),
   86 	OID_U32_C(DOT11_OID_EXUNENCRYPTED, 0x12000002),
   87 	OID_U32_C(DOT11_OID_DEFKEYID, 0x12000003),
   88 	[DOT11_OID_DEFKEYX] = {0x12000004, 3, sizeof (struct obj_key),
   89 			       OID_FLAG_CACHED | OID_TYPE_KEY},	/* DOT11_OID_DEFKEY1,...DOT11_OID_DEFKEY4 */
   90 	OID_UNKNOWN(DOT11_OID_STAKEY, 0x12000008),
   91 	OID_U32(DOT11_OID_REKEYTHRESHOLD, 0x12000009),
   92 	OID_UNKNOWN(DOT11_OID_STASC, 0x1200000a),
   93 
   94 	OID_U32(DOT11_OID_PRIVTXREJECTED, 0x1a000000),
   95 	OID_U32(DOT11_OID_PRIVRXPLAIN, 0x1a000001),
   96 	OID_U32(DOT11_OID_PRIVRXFAILED, 0x1a000002),
   97 	OID_U32(DOT11_OID_PRIVRXNOKEY, 0x1a000003),
   98 
   99 	OID_U32_C(DOT11_OID_RTSTHRESH, 0x13000000),
  100 	OID_U32_C(DOT11_OID_FRAGTHRESH, 0x13000001),
  101 	OID_U32_C(DOT11_OID_SHORTRETRIES, 0x13000002),
  102 	OID_U32_C(DOT11_OID_LONGRETRIES, 0x13000003),
  103 	OID_U32_C(DOT11_OID_MAXTXLIFETIME, 0x13000004),
  104 	OID_U32(DOT11_OID_MAXRXLIFETIME, 0x13000005),
  105 	OID_U32(DOT11_OID_AUTHRESPTIMEOUT, 0x13000006),
  106 	OID_U32(DOT11_OID_ASSOCRESPTIMEOUT, 0x13000007),
  107 
  108 	OID_UNKNOWN(DOT11_OID_ALOFT_TABLE, 0x1d000000),
  109 	OID_UNKNOWN(DOT11_OID_ALOFT_CTRL_TABLE, 0x1d000001),
  110 	OID_UNKNOWN(DOT11_OID_ALOFT_RETREAT, 0x1d000002),
  111 	OID_UNKNOWN(DOT11_OID_ALOFT_PROGRESS, 0x1d000003),
  112 	OID_U32(DOT11_OID_ALOFT_FIXEDRATE, 0x1d000004),
  113 	OID_UNKNOWN(DOT11_OID_ALOFT_RSSIGRAPH, 0x1d000005),
  114 	OID_UNKNOWN(DOT11_OID_ALOFT_CONFIG, 0x1d000006),
  115 
  116 	[DOT11_OID_VDCFX] = {0x1b000000, 7, 0, 0},
  117 	OID_U32(DOT11_OID_MAXFRAMEBURST, 0x1b000008),
  118 
  119 	OID_U32(DOT11_OID_PSM, 0x14000000),
  120 	OID_U32(DOT11_OID_CAMTIMEOUT, 0x14000001),
  121 	OID_U32(DOT11_OID_RECEIVEDTIMS, 0x14000002),
  122 	OID_U32(DOT11_OID_ROAMPREFERENCE, 0x14000003),
  123 
  124 	OID_U32(DOT11_OID_BRIDGELOCAL, 0x15000000),
  125 	OID_U32(DOT11_OID_CLIENTS, 0x15000001),
  126 	OID_U32(DOT11_OID_CLIENTSASSOCIATED, 0x15000002),
  127 	[DOT11_OID_CLIENTX] = {0x15000003, 2006, 0, 0},	/* DOT11_OID_CLIENTX,...DOT11_OID_CLIENT2007 */
  128 
  129 	OID_STRUCT(DOT11_OID_CLIENTFIND, 0x150007DB, u8[6], OID_TYPE_ADDR),
  130 	OID_STRUCT(DOT11_OID_WDSLINKADD, 0x150007DC, u8[6], OID_TYPE_ADDR),
  131 	OID_STRUCT(DOT11_OID_WDSLINKREMOVE, 0x150007DD, u8[6], OID_TYPE_ADDR),
  132 	OID_STRUCT(DOT11_OID_EAPAUTHSTA, 0x150007DE, u8[6], OID_TYPE_ADDR),
  133 	OID_STRUCT(DOT11_OID_EAPUNAUTHSTA, 0x150007DF, u8[6], OID_TYPE_ADDR),
  134 	OID_U32_C(DOT11_OID_DOT1XENABLE, 0x150007E0),
  135 	OID_UNKNOWN(DOT11_OID_MICFAILURE, 0x150007E1),
  136 	OID_UNKNOWN(DOT11_OID_REKEYINDICATE, 0x150007E2),
  137 
  138 	OID_U32(DOT11_OID_MPDUTXSUCCESSFUL, 0x16000000),
  139 	OID_U32(DOT11_OID_MPDUTXONERETRY, 0x16000001),
  140 	OID_U32(DOT11_OID_MPDUTXMULTIPLERETRIES, 0x16000002),
  141 	OID_U32(DOT11_OID_MPDUTXFAILED, 0x16000003),
  142 	OID_U32(DOT11_OID_MPDURXSUCCESSFUL, 0x16000004),
  143 	OID_U32(DOT11_OID_MPDURXDUPS, 0x16000005),
  144 	OID_U32(DOT11_OID_RTSSUCCESSFUL, 0x16000006),
  145 	OID_U32(DOT11_OID_RTSFAILED, 0x16000007),
  146 	OID_U32(DOT11_OID_ACKFAILED, 0x16000008),
  147 	OID_U32(DOT11_OID_FRAMERECEIVES, 0x16000009),
  148 	OID_U32(DOT11_OID_FRAMEERRORS, 0x1600000A),
  149 	OID_U32(DOT11_OID_FRAMEABORTS, 0x1600000B),
  150 	OID_U32(DOT11_OID_FRAMEABORTSPHY, 0x1600000C),
  151 
  152 	OID_U32(DOT11_OID_SLOTTIME, 0x17000000),
  153 	OID_U32(DOT11_OID_CWMIN, 0x17000001),
  154 	OID_U32(DOT11_OID_CWMAX, 0x17000002),
  155 	OID_U32(DOT11_OID_ACKWINDOW, 0x17000003),
  156 	OID_U32(DOT11_OID_ANTENNARX, 0x17000004),
  157 	OID_U32(DOT11_OID_ANTENNATX, 0x17000005),
  158 	OID_U32(DOT11_OID_ANTENNADIVERSITY, 0x17000006),
  159 	OID_U32_C(DOT11_OID_CHANNEL, 0x17000007),
  160 	OID_U32_C(DOT11_OID_EDTHRESHOLD, 0x17000008),
  161 	OID_U32(DOT11_OID_PREAMBLESETTINGS, 0x17000009),
  162 	OID_STRUCT(DOT11_OID_RATES, 0x1700000A, u8[IWMAX_BITRATES + 1],
  163 		   OID_TYPE_RAW),
  164 	OID_U32(DOT11_OID_CCAMODESUPPORTED, 0x1700000B),
  165 	OID_U32(DOT11_OID_CCAMODE, 0x1700000C),
  166 	OID_UNKNOWN(DOT11_OID_RSSIVECTOR, 0x1700000D),
  167 	OID_UNKNOWN(DOT11_OID_OUTPUTPOWERTABLE, 0x1700000E),
  168 	OID_U32(DOT11_OID_OUTPUTPOWER, 0x1700000F),
  169 	OID_STRUCT(DOT11_OID_SUPPORTEDRATES, 0x17000010,
  170 		   u8[IWMAX_BITRATES + 1], OID_TYPE_RAW),
  171 	OID_U32_C(DOT11_OID_FREQUENCY, 0x17000011),
  172 	[DOT11_OID_SUPPORTEDFREQUENCIES] =
  173 	    {0x17000012, 0, sizeof (struct obj_frequencies)
  174 	     + sizeof (u16) * IWMAX_FREQ, OID_TYPE_FREQUENCIES},
  175 
  176 	OID_U32(DOT11_OID_NOISEFLOOR, 0x17000013),
  177 	OID_STRUCT(DOT11_OID_FREQUENCYACTIVITY, 0x17000014, u8[IWMAX_FREQ + 1],
  178 		   OID_TYPE_RAW),
  179 	OID_UNKNOWN(DOT11_OID_IQCALIBRATIONTABLE, 0x17000015),
  180 	OID_U32(DOT11_OID_NONERPPROTECTION, 0x17000016),
  181 	OID_U32(DOT11_OID_SLOTSETTINGS, 0x17000017),
  182 	OID_U32(DOT11_OID_NONERPTIMEOUT, 0x17000018),
  183 	OID_U32(DOT11_OID_PROFILES, 0x17000019),
  184 	OID_STRUCT(DOT11_OID_EXTENDEDRATES, 0x17000020,
  185 		   u8[IWMAX_BITRATES + 1], OID_TYPE_RAW),
  186 
  187 	OID_STRUCT_MLME(DOT11_OID_DEAUTHENTICATE, 0x18000000),
  188 	OID_STRUCT_MLME(DOT11_OID_AUTHENTICATE, 0x18000001),
  189 	OID_STRUCT_MLME(DOT11_OID_DISASSOCIATE, 0x18000002),
  190 	OID_STRUCT_MLME(DOT11_OID_ASSOCIATE, 0x18000003),
  191 	OID_UNKNOWN(DOT11_OID_SCAN, 0x18000004),
  192 	OID_STRUCT_MLMEEX(DOT11_OID_BEACON, 0x18000005),
  193 	OID_STRUCT_MLMEEX(DOT11_OID_PROBE, 0x18000006),
  194 	OID_STRUCT_MLMEEX(DOT11_OID_DEAUTHENTICATEEX, 0x18000007),
  195 	OID_STRUCT_MLMEEX(DOT11_OID_AUTHENTICATEEX, 0x18000008),
  196 	OID_STRUCT_MLMEEX(DOT11_OID_DISASSOCIATEEX, 0x18000009),
  197 	OID_STRUCT_MLMEEX(DOT11_OID_ASSOCIATEEX, 0x1800000A),
  198 	OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATE, 0x1800000B),
  199 	OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATEEX, 0x1800000C),
  200 
  201 	OID_U32(DOT11_OID_NONERPSTATUS, 0x1E000000),
  202 
  203 	OID_U32(DOT11_OID_STATIMEOUT, 0x19000000),
  204 	OID_U32_C(DOT11_OID_MLMEAUTOLEVEL, 0x19000001),
  205 	OID_U32(DOT11_OID_BSSTIMEOUT, 0x19000002),
  206 	[DOT11_OID_ATTACHMENT] = {0x19000003, 0,
  207 		sizeof(struct obj_attachment), OID_TYPE_ATTACH},
  208 	OID_STRUCT_C(DOT11_OID_PSMBUFFER, 0x19000004, struct obj_buffer,
  209 		     OID_TYPE_BUFFER),
  210 
  211 	OID_U32(DOT11_OID_BSSS, 0x1C000000),
  212 	[DOT11_OID_BSSX] = {0x1C000001, 63, sizeof (struct obj_bss),
  213 			    OID_TYPE_BSS},	/*DOT11_OID_BSS1,...,DOT11_OID_BSS64 */
  214 	OID_STRUCT(DOT11_OID_BSSFIND, 0x1C000042, struct obj_bss, OID_TYPE_BSS),
  215 	[DOT11_OID_BSSLIST] = {0x1C000043, 0, sizeof (struct
  216 						      obj_bsslist) +
  217 			       sizeof (struct obj_bss[IWMAX_BSS]),
  218 			       OID_TYPE_BSSLIST},
  219 
  220 	OID_UNKNOWN(OID_INL_TUNNEL, 0xFF020000),
  221 	OID_UNKNOWN(OID_INL_MEMADDR, 0xFF020001),
  222 	OID_UNKNOWN(OID_INL_MEMORY, 0xFF020002),
  223 	OID_U32_C(OID_INL_MODE, 0xFF020003),
  224 	OID_UNKNOWN(OID_INL_COMPONENT_NR, 0xFF020004),
  225 	OID_STRUCT(OID_INL_VERSION, 0xFF020005, u8[8], OID_TYPE_RAW),
  226 	OID_UNKNOWN(OID_INL_INTERFACE_ID, 0xFF020006),
  227 	OID_UNKNOWN(OID_INL_COMPONENT_ID, 0xFF020007),
  228 	OID_U32_C(OID_INL_CONFIG, 0xFF020008),
  229 	OID_U32_C(OID_INL_DOT11D_CONFORMANCE, 0xFF02000C),
  230 	OID_U32(OID_INL_PHYCAPABILITIES, 0xFF02000D),
  231 	OID_U32_C(OID_INL_OUTPUTPOWER, 0xFF02000F),
  232 
  233 };
  234 
  235 int
  236 mgt_init(islpci_private *priv)
  237 {
  238 	int i;
  239 
  240 	priv->mib = kcalloc(OID_NUM_LAST, sizeof (void *), GFP_KERNEL);
  241 	if (!priv->mib)
  242 		return -ENOMEM;
  243 
  244 	/* Alloc the cache */
  245 	for (i = 0; i < OID_NUM_LAST; i++) {
  246 		if (isl_oid[i].flags & OID_FLAG_CACHED) {
  247 			priv->mib[i] = kzalloc(isl_oid[i].size *
  248 					       (isl_oid[i].range + 1),
  249 					       GFP_KERNEL);
  250 			if (!priv->mib[i])
  251 				return -ENOMEM;
  252 		} else
  253 			priv->mib[i] = NULL;
  254 	}
  255 
  256 	init_rwsem(&priv->mib_sem);
  257 	prism54_mib_init(priv);
  258 
  259 	return 0;
  260 }
  261 
  262 void
  263 mgt_clean(islpci_private *priv)
  264 {
  265 	int i;
  266 
  267 	if (!priv->mib)
  268 		return;
  269 	for (i = 0; i < OID_NUM_LAST; i++) {
  270 		kfree(priv->mib[i]);
  271 		priv->mib[i] = NULL;
  272 	}
  273 	kfree(priv->mib);
  274 	priv->mib = NULL;
  275 }
  276 
  277 void
  278 mgt_le_to_cpu(int type, void *data)
  279 {
  280 	switch (type) {
  281 	case OID_TYPE_U32:
  282 		*(u32 *) data = le32_to_cpu(*(u32 *) data);
  283 		break;
  284 	case OID_TYPE_BUFFER:{
  285 			struct obj_buffer *buff = data;
  286 			buff->size = le32_to_cpu(buff->size);
  287 			buff->addr = le32_to_cpu(buff->addr);
  288 			break;
  289 		}
  290 	case OID_TYPE_BSS:{
  291 			struct obj_bss *bss = data;
  292 			bss->age = le16_to_cpu(bss->age);
  293 			bss->channel = le16_to_cpu(bss->channel);
  294 			bss->capinfo = le16_to_cpu(bss->capinfo);
  295 			bss->rates = le16_to_cpu(bss->rates);
  296 			bss->basic_rates = le16_to_cpu(bss->basic_rates);
  297 			break;
  298 		}
  299 	case OID_TYPE_BSSLIST:{
  300 			struct obj_bsslist *list = data;
  301 			int i;
  302 			list->nr = le32_to_cpu(list->nr);
  303 			for (i = 0; i < list->nr; i++)
  304 				mgt_le_to_cpu(OID_TYPE_BSS, &list->bsslist[i]);
  305 			break;
  306 		}
  307 	case OID_TYPE_FREQUENCIES:{
  308 			struct obj_frequencies *freq = data;
  309 			int i;
  310 			freq->nr = le16_to_cpu(freq->nr);
  311 			for (i = 0; i < freq->nr; i++)
  312 				freq->mhz[i] = le16_to_cpu(freq->mhz[i]);
  313 			break;
  314 		}
  315 	case OID_TYPE_MLME:{
  316 			struct obj_mlme *mlme = data;
  317 			mlme->id = le16_to_cpu(mlme->id);
  318 			mlme->state = le16_to_cpu(mlme->state);
  319 			mlme->code = le16_to_cpu(mlme->code);
  320 			break;
  321 		}
  322 	case OID_TYPE_MLMEEX:{
  323 			struct obj_mlmeex *mlme = data;
  324 			mlme->id = le16_to_cpu(mlme->id);
  325 			mlme->state = le16_to_cpu(mlme->state);
  326 			mlme->code = le16_to_cpu(mlme->code);
  327 			mlme->size = le16_to_cpu(mlme->size);
  328 			break;
  329 		}
  330 	case OID_TYPE_ATTACH:{
  331 			struct obj_attachment *attach = data;
  332 			attach->id = le16_to_cpu(attach->id);
  333 			attach->size = le16_to_cpu(attach->size);
  334 			break;
  335 	}
  336 	case OID_TYPE_SSID:
  337 	case OID_TYPE_KEY:
  338 	case OID_TYPE_ADDR:
  339 	case OID_TYPE_RAW:
  340 		break;
  341 	default:
  342 		BUG();
  343 	}
  344 }
  345 
  346 static void
  347 mgt_cpu_to_le(int type, void *data)
  348 {
  349 	switch (type) {
  350 	case OID_TYPE_U32:
  351 		*(u32 *) data = cpu_to_le32(*(u32 *) data);
  352 		break;
  353 	case OID_TYPE_BUFFER:{
  354 			struct obj_buffer *buff = data;
  355 			buff->size = cpu_to_le32(buff->size);
  356 			buff->addr = cpu_to_le32(buff->addr);
  357 			break;
  358 		}
  359 	case OID_TYPE_BSS:{
  360 			struct obj_bss *bss = data;
  361 			bss->age = cpu_to_le16(bss->age);
  362 			bss->channel = cpu_to_le16(bss->channel);
  363 			bss->capinfo = cpu_to_le16(bss->capinfo);
  364 			bss->rates = cpu_to_le16(bss->rates);
  365 			bss->basic_rates = cpu_to_le16(bss->basic_rates);
  366 			break;
  367 		}
  368 	case OID_TYPE_BSSLIST:{
  369 			struct obj_bsslist *list = data;
  370 			int i;
  371 			list->nr = cpu_to_le32(list->nr);
  372 			for (i = 0; i < list->nr; i++)
  373 				mgt_cpu_to_le(OID_TYPE_BSS, &list->bsslist[i]);
  374 			break;
  375 		}
  376 	case OID_TYPE_FREQUENCIES:{
  377 			struct obj_frequencies *freq = data;
  378 			int i;
  379 			freq->nr = cpu_to_le16(freq->nr);
  380 			for (i = 0; i < freq->nr; i++)
  381 				freq->mhz[i] = cpu_to_le16(freq->mhz[i]);
  382 			break;
  383 		}
  384 	case OID_TYPE_MLME:{
  385 			struct obj_mlme *mlme = data;
  386 			mlme->id = cpu_to_le16(mlme->id);
  387 			mlme->state = cpu_to_le16(mlme->state);
  388 			mlme->code = cpu_to_le16(mlme->code);
  389 			break;
  390 		}
  391 	case OID_TYPE_MLMEEX:{
  392 			struct obj_mlmeex *mlme = data;
  393 			mlme->id = cpu_to_le16(mlme->id);
  394 			mlme->state = cpu_to_le16(mlme->state);
  395 			mlme->code = cpu_to_le16(mlme->code);
  396 			mlme->size = cpu_to_le16(mlme->size);
  397 			break;
  398 		}
  399 	case OID_TYPE_ATTACH:{
  400 			struct obj_attachment *attach = data;
  401 			attach->id = cpu_to_le16(attach->id);
  402 			attach->size = cpu_to_le16(attach->size);
  403 			break;
  404 	}
  405 	case OID_TYPE_SSID:
  406 	case OID_TYPE_KEY:
  407 	case OID_TYPE_ADDR:
  408 	case OID_TYPE_RAW:
  409 		break;
  410 	default:
  411 		BUG();
  412 	}
  413 }
  414 
  415 /* Note : data is modified during this function */
  416 
  417 int
  418 mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data)
  419 {
  420 	int ret = 0;
  421 	struct islpci_mgmtframe *response = NULL;
  422 	int response_op = PIMFOR_OP_ERROR;
  423 	int dlen;
  424 	void *cache, *_data = data;
  425 	u32 oid;
  426 
  427 	BUG_ON(OID_NUM_LAST <= n);
  428 	BUG_ON(extra > isl_oid[n].range);
  429 
  430 	if (!priv->mib)
  431 		/* memory has been freed */
  432 		return -1;
  433 
  434 	dlen = isl_oid[n].size;
  435 	cache = priv->mib[n];
  436 	cache += (cache ? extra * dlen : 0);
  437 	oid = isl_oid[n].oid + extra;
  438 
  439 	if (_data == NULL)
  440 		/* we are requested to re-set a cached value */
  441 		_data = cache;
  442 	else
  443 		mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, _data);
  444 	/* If we are going to write to the cache, we don't want anyone to read
  445 	 * it -> acquire write lock.
  446 	 * Else we could acquire a read lock to be sure we don't bother the
  447 	 * commit process (which takes a write lock). But I'm not sure if it's
  448 	 * needed.
  449 	 */
  450 	if (cache)
  451 		down_write(&priv->mib_sem);
  452 
  453 	if (islpci_get_state(priv) >= PRV_STATE_READY) {
  454 		ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid,
  455 					     _data, dlen, &response);
  456 		if (!ret) {
  457 			response_op = response->header->operation;
  458 			islpci_mgt_release(response);
  459 		}
  460 		if (ret || response_op == PIMFOR_OP_ERROR)
  461 			ret = -EIO;
  462 	} else if (!cache)
  463 		ret = -EIO;
  464 
  465 	if (cache) {
  466 		if (!ret && data)
  467 			memcpy(cache, _data, dlen);
  468 		up_write(&priv->mib_sem);
  469 	}
  470 
  471 	/* re-set given data to what it was */
  472 	if (data)
  473 		mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data);
  474 
  475 	return ret;
  476 }
  477 
  478 /* None of these are cached */
  479 int
  480 mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len)
  481 {
  482 	int ret = 0;
  483 	struct islpci_mgmtframe *response;
  484 	int response_op = PIMFOR_OP_ERROR;
  485 	int dlen;
  486 	u32 oid;
  487 
  488 	BUG_ON(OID_NUM_LAST <= n);
  489 
  490 	dlen = isl_oid[n].size;
  491 	oid = isl_oid[n].oid;
  492 
  493 	mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, data);
  494 
  495 	if (islpci_get_state(priv) >= PRV_STATE_READY) {
  496 		ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid,
  497 					     data, dlen + extra_len, &response);
  498 		if (!ret) {
  499 			response_op = response->header->operation;
  500 			islpci_mgt_release(response);
  501 		}
  502 		if (ret || response_op == PIMFOR_OP_ERROR)
  503 			ret = -EIO;
  504 	} else
  505 		ret = -EIO;
  506 
  507 	/* re-set given data to what it was */
  508 	if (data)
  509 		mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data);
  510 
  511 	return ret;
  512 }
  513 
  514 int
  515 mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data,
  516 		union oid_res_t *res)
  517 {
  518 
  519 	int ret = -EIO;
  520 	int reslen = 0;
  521 	struct islpci_mgmtframe *response = NULL;
  522 
  523 	int dlen;
  524 	void *cache, *_res = NULL;
  525 	u32 oid;
  526 
  527 	BUG_ON(OID_NUM_LAST <= n);
  528 	BUG_ON(extra > isl_oid[n].range);
  529 
  530 	res->ptr = NULL;
  531 
  532 	if (!priv->mib)
  533 		/* memory has been freed */
  534 		return -1;
  535 
  536 	dlen = isl_oid[n].size;
  537 	cache = priv->mib[n];
  538 	cache += cache ? extra * dlen : 0;
  539 	oid = isl_oid[n].oid + extra;
  540 	reslen = dlen;
  541 
  542 	if (cache)
  543 		down_read(&priv->mib_sem);
  544 
  545 	if (islpci_get_state(priv) >= PRV_STATE_READY) {
  546 		ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
  547 					     oid, data, dlen, &response);
  548 		if (ret || !response ||
  549 		    response->header->operation == PIMFOR_OP_ERROR) {
  550 			if (response)
  551 				islpci_mgt_release(response);
  552 			ret = -EIO;
  553 		}
  554 		if (!ret) {
  555 			_res = response->data;
  556 			reslen = response->header->length;
  557 		}
  558 	} else if (cache) {
  559 		_res = cache;
  560 		ret = 0;
  561 	}
  562 	if ((isl_oid[n].flags & OID_FLAG_TYPE) == OID_TYPE_U32)
  563 		res->u = ret ? 0 : le32_to_cpu(*(u32 *) _res);
  564 	else {
  565 		res->ptr = kmalloc(reslen, GFP_KERNEL);
  566 		BUG_ON(res->ptr == NULL);
  567 		if (ret)
  568 			memset(res->ptr, 0, reslen);
  569 		else {
  570 			memcpy(res->ptr, _res, reslen);
  571 			mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE,
  572 				      res->ptr);
  573 		}
  574 	}
  575 	if (cache)
  576 		up_read(&priv->mib_sem);
  577 
  578 	if (response && !ret)
  579 		islpci_mgt_release(response);
  580 
  581 	if (reslen > isl_oid[n].size)
  582 		printk(KERN_DEBUG
  583 		       "mgt_get_request(0x%x): received data length was bigger "
  584 		       "than expected (%d > %d). Memory is probably corrupted...",
  585 		       oid, reslen, isl_oid[n].size);
  586 
  587 	return ret;
  588 }
  589 
  590 /* lock outside */
  591 int
  592 mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n)
  593 {
  594 	int i, ret = 0;
  595 	struct islpci_mgmtframe *response;
  596 
  597 	for (i = 0; i < n; i++) {
  598 		struct oid_t *t = &(isl_oid[l[i]]);
  599 		void *data = priv->mib[l[i]];
  600 		int j = 0;
  601 		u32 oid = t->oid;
  602 		BUG_ON(data == NULL);
  603 		while (j <= t->range) {
  604 			int r = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET,
  605 						      oid, data, t->size,
  606 						      &response);
  607 			if (response) {
  608 				r |= (response->header->operation == PIMFOR_OP_ERROR);
  609 				islpci_mgt_release(response);
  610 			}
  611 			if (r)
  612 				printk(KERN_ERR "%s: mgt_commit_list: failure. "
  613 					"oid=%08x err=%d\n",
  614 					priv->ndev->name, oid, r);
  615 			ret |= r;
  616 			j++;
  617 			oid++;
  618 			data += t->size;
  619 		}
  620 	}
  621 	return ret;
  622 }
  623 
  624 /* Lock outside */
  625 
  626 void
  627 mgt_set(islpci_private *priv, enum oid_num_t n, void *data)
  628 {
  629 	BUG_ON(OID_NUM_LAST <= n);
  630 	BUG_ON(priv->mib[n] == NULL);
  631 
  632 	memcpy(priv->mib[n], data, isl_oid[n].size);
  633 	mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, priv->mib[n]);
  634 }
  635 
  636 void
  637 mgt_get(islpci_private *priv, enum oid_num_t n, void *res)
  638 {
  639 	BUG_ON(OID_NUM_LAST <= n);
  640 	BUG_ON(priv->mib[n] == NULL);
  641 	BUG_ON(res == NULL);
  642 
  643 	memcpy(res, priv->mib[n], isl_oid[n].size);
  644 	mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, res);
  645 }
  646 
  647 /* Commits the cache. Lock outside. */
  648 
  649 static enum oid_num_t commit_part1[] = {
  650 	OID_INL_CONFIG,
  651 	OID_INL_MODE,
  652 	DOT11_OID_BSSTYPE,
  653 	DOT11_OID_CHANNEL,
  654 	DOT11_OID_MLMEAUTOLEVEL
  655 };
  656 
  657 static enum oid_num_t commit_part2[] = {
  658 	DOT11_OID_SSID,
  659 	DOT11_OID_PSMBUFFER,
  660 	DOT11_OID_AUTHENABLE,
  661 	DOT11_OID_PRIVACYINVOKED,
  662 	DOT11_OID_EXUNENCRYPTED,
  663 	DOT11_OID_DEFKEYX,	/* MULTIPLE */
  664 	DOT11_OID_DEFKEYID,
  665 	DOT11_OID_DOT1XENABLE,
  666 	OID_INL_DOT11D_CONFORMANCE,
  667 	/* Do not initialize this - fw < 1.0.4.3 rejects it
  668 	OID_INL_OUTPUTPOWER,
  669 	*/
  670 };
  671 
  672 /* update the MAC addr. */
  673 static int
  674 mgt_update_addr(islpci_private *priv)
  675 {
  676 	struct islpci_mgmtframe *res;
  677 	int ret;
  678 
  679 	ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
  680 				     isl_oid[GEN_OID_MACADDRESS].oid, NULL,
  681 				     isl_oid[GEN_OID_MACADDRESS].size, &res);
  682 
  683 	if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
  684 		memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
  685 	else
  686 		ret = -EIO;
  687 	if (res)
  688 		islpci_mgt_release(res);
  689 
  690 	if (ret)
  691 		printk(KERN_ERR "%s: mgt_update_addr: failure\n", priv->ndev->name);
  692 	return ret;
  693 }
  694 
  695 int
  696 mgt_commit(islpci_private *priv)
  697 {
  698 	int rvalue;
  699 	enum oid_num_t u;
  700 
  701 	if (islpci_get_state(priv) < PRV_STATE_INIT)
  702 		return 0;
  703 
  704 	rvalue = mgt_commit_list(priv, commit_part1, ARRAY_SIZE(commit_part1));
  705 
  706 	if (priv->iw_mode != IW_MODE_MONITOR)
  707 		rvalue |= mgt_commit_list(priv, commit_part2, ARRAY_SIZE(commit_part2));
  708 
  709 	u = OID_INL_MODE;
  710 	rvalue |= mgt_commit_list(priv, &u, 1);
  711 	rvalue |= mgt_update_addr(priv);
  712 
  713 	if (rvalue) {
  714 		/* some request have failed. The device might be in an
  715 		   incoherent state. We should reset it ! */
  716 		printk(KERN_DEBUG "%s: mgt_commit: failure\n", priv->ndev->name);
  717 	}
  718 	return rvalue;
  719 }
  720 
  721 /* The following OIDs need to be "unlatched":
  722  *
  723  * MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL
  724  * FREQUENCY,EXTENDEDRATES.
  725  *
  726  * The way to do this is to set ESSID. Note though that they may get
  727  * unlatch before though by setting another OID. */
  728 #if 0
  729 void
  730 mgt_unlatch_all(islpci_private *priv)
  731 {
  732 	u32 u;
  733 	int rvalue = 0;
  734 
  735 	if (islpci_get_state(priv) < PRV_STATE_INIT)
  736 		return;
  737 
  738 	u = DOT11_OID_SSID;
  739 	rvalue = mgt_commit_list(priv, &u, 1);
  740 	/* Necessary if in MANUAL RUN mode? */
  741 #if 0
  742 	u = OID_INL_MODE;
  743 	rvalue |= mgt_commit_list(priv, &u, 1);
  744 
  745 	u = DOT11_OID_MLMEAUTOLEVEL;
  746 	rvalue |= mgt_commit_list(priv, &u, 1);
  747 
  748 	u = OID_INL_MODE;
  749 	rvalue |= mgt_commit_list(priv, &u, 1);
  750 #endif
  751 
  752 	if (rvalue)
  753 		printk(KERN_DEBUG "%s: Unlatching OIDs failed\n", priv->ndev->name);
  754 }
  755 #endif
  756 
  757 /* This will tell you if you are allowed to answer a mlme(ex) request .*/
  758 
  759 int
  760 mgt_mlme_answer(islpci_private *priv)
  761 {
  762 	u32 mlmeautolevel;
  763 	/* Acquire a read lock because if we are in a mode change, it's
  764 	 * possible to answer true, while the card is leaving master to managed
  765 	 * mode. Answering to a mlme in this situation could hang the card.
  766 	 */
  767 	down_read(&priv->mib_sem);
  768 	mlmeautolevel =
  769 	    le32_to_cpu(*(u32 *) priv->mib[DOT11_OID_MLMEAUTOLEVEL]);
  770 	up_read(&priv->mib_sem);
  771 
  772 	return ((priv->iw_mode == IW_MODE_MASTER) &&
  773 		(mlmeautolevel >= DOT11_MLME_INTERMEDIATE));
  774 }
  775 
  776 enum oid_num_t
  777 mgt_oidtonum(u32 oid)
  778 {
  779 	int i;
  780 
  781 	for (i = 0; i < OID_NUM_LAST; i++)
  782 		if (isl_oid[i].oid == oid)
  783 			return i;
  784 
  785 	printk(KERN_DEBUG "looking for an unknown oid 0x%x", oid);
  786 
  787 	return OID_NUM_LAST;
  788 }
  789 
  790 int
  791 mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
  792 {
  793 	switch (isl_oid[n].flags & OID_FLAG_TYPE) {
  794 	case OID_TYPE_U32:
  795 		return snprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
  796 		break;
  797 	case OID_TYPE_BUFFER:{
  798 			struct obj_buffer *buff = r->ptr;
  799 			return snprintf(str, PRIV_STR_SIZE,
  800 					"size=%u\naddr=0x%X\n", buff->size,
  801 					buff->addr);
  802 		}
  803 		break;
  804 	case OID_TYPE_BSS:{
  805 			struct obj_bss *bss = r->ptr;
  806 			return snprintf(str, PRIV_STR_SIZE,
  807 					"age=%u\nchannel=%u\n"
  808 					"capinfo=0x%X\nrates=0x%X\n"
  809 					"basic_rates=0x%X\n", bss->age,
  810 					bss->channel, bss->capinfo,
  811 					bss->rates, bss->basic_rates);
  812 		}
  813 		break;
  814 	case OID_TYPE_BSSLIST:{
  815 			struct obj_bsslist *list = r->ptr;
  816 			int i, k;
  817 			k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
  818 			for (i = 0; i < list->nr; i++)
  819 				k += snprintf(str + k, PRIV_STR_SIZE - k,
  820 					      "bss[%u] :\nage=%u\nchannel=%u\n"
  821 					      "capinfo=0x%X\nrates=0x%X\n"
  822 					      "basic_rates=0x%X\n",
  823 					      i, list->bsslist[i].age,
  824 					      list->bsslist[i].channel,
  825 					      list->bsslist[i].capinfo,
  826 					      list->bsslist[i].rates,
  827 					      list->bsslist[i].basic_rates);
  828 			return k;
  829 		}
  830 		break;
  831 	case OID_TYPE_FREQUENCIES:{
  832 			struct obj_frequencies *freq = r->ptr;
  833 			int i, t;
  834 			printk("nr : %u\n", freq->nr);
  835 			t = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
  836 			for (i = 0; i < freq->nr; i++)
  837 				t += snprintf(str + t, PRIV_STR_SIZE - t,
  838 					      "mhz[%u]=%u\n", i, freq->mhz[i]);
  839 			return t;
  840 		}
  841 		break;
  842 	case OID_TYPE_MLME:{
  843 			struct obj_mlme *mlme = r->ptr;
  844 			return snprintf(str, PRIV_STR_SIZE,
  845 					"id=0x%X\nstate=0x%X\ncode=0x%X\n",
  846 					mlme->id, mlme->state, mlme->code);
  847 		}
  848 		break;
  849 	case OID_TYPE_MLMEEX:{
  850 			struct obj_mlmeex *mlme = r->ptr;
  851 			return snprintf(str, PRIV_STR_SIZE,
  852 					"id=0x%X\nstate=0x%X\n"
  853 					"code=0x%X\nsize=0x%X\n", mlme->id,
  854 					mlme->state, mlme->code, mlme->size);
  855 		}
  856 		break;
  857 	case OID_TYPE_ATTACH:{
  858 			struct obj_attachment *attach = r->ptr;
  859 			return snprintf(str, PRIV_STR_SIZE,
  860 					"id=%d\nsize=%d\n",
  861 					attach->id,
  862 					attach->size);
  863 		}
  864 		break;
  865 	case OID_TYPE_SSID:{
  866 			struct obj_ssid *ssid = r->ptr;
  867 			return snprintf(str, PRIV_STR_SIZE,
  868 					"length=%u\noctets=%.*s\n",
  869 					ssid->length, ssid->length,
  870 					ssid->octets);
  871 		}
  872 		break;
  873 	case OID_TYPE_KEY:{
  874 			struct obj_key *key = r->ptr;
  875 			int t, i;
  876 			t = snprintf(str, PRIV_STR_SIZE,
  877 				     "type=0x%X\nlength=0x%X\nkey=0x",
  878 				     key->type, key->length);
  879 			for (i = 0; i < key->length; i++)
  880 				t += snprintf(str + t, PRIV_STR_SIZE - t,
  881 					      "%02X:", key->key[i]);
  882 			t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
  883 			return t;
  884 		}
  885 		break;
  886 	case OID_TYPE_RAW:
  887 	case OID_TYPE_ADDR:{
  888 			unsigned char *buff = r->ptr;
  889 			int t, i;
  890 			t = snprintf(str, PRIV_STR_SIZE, "hex data=");
  891 			for (i = 0; i < isl_oid[n].size; i++)
  892 				t += snprintf(str + t, PRIV_STR_SIZE - t,
  893 					      "%02X:", buff[i]);
  894 			t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
  895 			return t;
  896 		}
  897 		break;
  898 	default:
  899 		BUG();
  900 	}
  901 	return 0;
  902 }                 1 
    2  #include <linux/types.h>
    3  #include <linux/dma-direction.h>
    4  #include <verifier/rcv.h>
    5  #include <verifier/set.h>
    6  #include <verifier/map.h>
    7 
    8  Set LDV_DMA_MAP_CALLS;
    9 
   10  /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
   11  dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir) {
   12   dma_addr_t nonedetermined;
   13 
   14   nonedetermined = ldv_undef_ptr();
   15 
   16   /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
   17   ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
   18 
   19   ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
   20 
   21   return nonedetermined;
   22  }
   23 
   24  /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
   25  int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) {
   26 
   27   /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
   28   ldv_assert(ldv_set_contains(LDV_DMA_MAP_CALLS, dma_addr));
   29   ldv_set_remove(LDV_DMA_MAP_CALLS, dma_addr);
   30 
   31   int nonedetermined;
   32 
   33   nonedetermined = ldv_undef_int();
   34 
   35   return nonedetermined;
   36  }
   37 
   38 
   39 
   40  /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single') maps pci_dma */
   41  dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) {
   42   dma_addr_t nonedetermined;
   43 
   44   nonedetermined = ldv_undef_ptr();
   45 
   46   /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
   47   ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
   48 
   49   ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
   50 
   51   return nonedetermined;
   52  }
   53 
   54  /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single_attrs') maps pci_dma */
   55  dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) {
   56   dma_addr_t nonedetermined;
   57 
   58   nonedetermined = ldv_undef_ptr();
   59 
   60   /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
   61   ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
   62 
   63   ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
   64 
   65   return nonedetermined;
   66  }
   67 
   68  /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Initialize all module reference counters at the beginning */
   69  void ldv_initialize(void) {
   70   /* LDV_COMMENT_CHANGE_STATE All module reference counters have some initial value at the beginning */
   71   ldv_set_init(LDV_DMA_MAP_CALLS);
   72  }
   73 
   74  /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
   75  void ldv_check_final_state(void) {
   76   /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
   77   ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
   78  }                 1 /* include this file if the platform implements the dma_ DMA Mapping API
    2  * and wants to provide the pci_ DMA Mapping API in terms of it */
    3 
    4 #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
    5 #define _ASM_GENERIC_PCI_DMA_COMPAT_H
    6 
    7 #include <linux/dma-mapping.h>
    8 
    9 static inline int
   10 pci_dma_supported(struct pci_dev *hwdev, u64 mask)
   11 {
   12 	return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
   13 }
   14 
   15 static inline void *
   16 pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
   17 		     dma_addr_t *dma_handle)
   18 {
   19 	return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
   20 }
   21 
   22 static inline void
   23 pci_free_consistent(struct pci_dev *hwdev, size_t size,
   24 		    void *vaddr, dma_addr_t dma_handle)
   25 {
   26 	dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
   27 }
   28 
   29 static inline dma_addr_t
   30 pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
   31 {
   32 	return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
   33 }
   34 
   35 static inline void
   36 pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
   37 		 size_t size, int direction)
   38 {
   39 	dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
   40 }
   41 
   42 static inline dma_addr_t
   43 pci_map_page(struct pci_dev *hwdev, struct page *page,
   44 	     unsigned long offset, size_t size, int direction)
   45 {
   46 	return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
   47 }
   48 
   49 static inline void
   50 pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
   51 	       size_t size, int direction)
   52 {
   53 	dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
   54 }
   55 
   56 static inline int
   57 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
   58 	   int nents, int direction)
   59 {
   60 	return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
   61 }
   62 
   63 static inline void
   64 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
   65 	     int nents, int direction)
   66 {
   67 	dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
   68 }
   69 
   70 static inline void
   71 pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
   72 		    size_t size, int direction)
   73 {
   74 	dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
   75 }
   76 
   77 static inline void
   78 pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
   79 		    size_t size, int direction)
   80 {
   81 	dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
   82 }
   83 
   84 static inline void
   85 pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
   86 		int nelems, int direction)
   87 {
   88 	dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
   89 }
   90 
   91 static inline void
   92 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
   93 		int nelems, int direction)
   94 {
   95 	dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
   96 }
   97 
   98 static inline int
   99 pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
  100 {
  101 	return dma_mapping_error(&pdev->dev, dma_addr);
  102 }
  103 
  104 #ifdef CONFIG_PCI
  105 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
  106 {
  107 	return dma_set_mask(&dev->dev, mask);
  108 }
  109 
  110 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
  111 {
  112 	return dma_set_coherent_mask(&dev->dev, mask);
  113 }
  114 #endif
  115 
  116 #endif                 1 /*
    2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
    3  *		operating system.  INET is implemented using the  BSD Socket
    4  *		interface as the means of communication with the user level.
    5  *
    6  *		Definitions for the Interfaces handler.
    7  *
    8  * Version:	@(#)dev.h	1.0.10	08/12/93
    9  *
   10  * Authors:	Ross Biro
   11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
   13  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
   14  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
   15  *		Bjorn Ekwall. <bj0rn@blox.se>
   16  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
   17  *
   18  *		This program is free software; you can redistribute it and/or
   19  *		modify it under the terms of the GNU General Public License
   20  *		as published by the Free Software Foundation; either version
   21  *		2 of the License, or (at your option) any later version.
   22  *
   23  *		Moved to /usr/include/linux for NET3
   24  */
   25 #ifndef _LINUX_NETDEVICE_H
   26 #define _LINUX_NETDEVICE_H
   27 
   28 #include <linux/pm_qos.h>
   29 #include <linux/timer.h>
   30 #include <linux/bug.h>
   31 #include <linux/delay.h>
   32 #include <linux/atomic.h>
   33 #include <asm/cache.h>
   34 #include <asm/byteorder.h>
   35 
   36 #include <linux/percpu.h>
   37 #include <linux/rculist.h>
   38 #include <linux/dmaengine.h>
   39 #include <linux/workqueue.h>
   40 #include <linux/dynamic_queue_limits.h>
   41 
   42 #include <linux/ethtool.h>
   43 #include <net/net_namespace.h>
   44 #include <net/dsa.h>
   45 #ifdef CONFIG_DCB
   46 #include <net/dcbnl.h>
   47 #endif
   48 #include <net/netprio_cgroup.h>
   49 
   50 #include <linux/netdev_features.h>
   51 #include <linux/neighbour.h>
   52 #include <uapi/linux/netdevice.h>
   53 
   54 struct netpoll_info;
   55 struct device;
   56 struct phy_device;
   57 /* 802.11 specific */
   58 struct wireless_dev;
   59 					/* source back-compat hooks */
   60 #define SET_ETHTOOL_OPS(netdev,ops) \
   61 	( (netdev)->ethtool_ops = (ops) )
   62 
   63 void netdev_set_default_ethtool_ops(struct net_device *dev,
   64 				    const struct ethtool_ops *ops);
   65 
   66 /* hardware address assignment types */
   67 #define NET_ADDR_PERM		0	/* address is permanent (default) */
   68 #define NET_ADDR_RANDOM		1	/* address is generated randomly */
   69 #define NET_ADDR_STOLEN		2	/* address is stolen from other device */
   70 #define NET_ADDR_SET		3	/* address is set using
   71 					 * dev_set_mac_address() */
   72 
   73 /* Backlog congestion levels */
   74 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
   75 #define NET_RX_DROP		1	/* packet dropped */
   76 
   77 /*
   78  * Transmit return codes: transmit return codes originate from three different
   79  * namespaces:
   80  *
   81  * - qdisc return codes
   82  * - driver transmit return codes
   83  * - errno values
   84  *
   85  * Drivers are allowed to return any one of those in their hard_start_xmit()
   86  * function. Real network devices commonly used with qdiscs should only return
   87  * the driver transmit return codes though - when qdiscs are used, the actual
   88  * transmission happens asynchronously, so the value is not propagated to
   89  * higher layers. Virtual network devices transmit synchronously, in this case
   90  * the driver transmit return codes are consumed by dev_queue_xmit(), all
   91  * others are propagated to higher layers.
   92  */
   93 
   94 /* qdisc ->enqueue() return codes. */
   95 #define NET_XMIT_SUCCESS	0x00
   96 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
   97 #define NET_XMIT_CN		0x02	/* congestion notification	*/
   98 #define NET_XMIT_POLICED	0x03	/* skb is shot by police	*/
   99 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
  100 
  101 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  102  * indicates that the device will soon be dropping packets, or already drops
  103  * some packets of the same priority; prompting us to send less aggressively. */
  104 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
  105 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  106 
  107 /* Driver transmit return codes */
  108 #define NETDEV_TX_MASK		0xf0
  109 
  110 enum netdev_tx {
  111 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
  112 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
  113 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
  114 	NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */
  115 };
  116 typedef enum netdev_tx netdev_tx_t;
  117 
  118 /*
  119  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  120  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  121  */
  122 static inline bool dev_xmit_complete(int rc)
  123 {
  124 	/*
  125 	 * Positive cases with an skb consumed by a driver:
  126 	 * - successful transmission (rc == NETDEV_TX_OK)
  127 	 * - error while transmitting (rc < 0)
  128 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
  129 	 */
  130 	if (likely(rc < NET_XMIT_MASK))
  131 		return true;
  132 
  133 	return false;
  134 }
  135 
  136 /*
  137  *	Compute the worst case header length according to the protocols
  138  *	used.
  139  */
  140 
  141 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  142 # if defined(CONFIG_MAC80211_MESH)
  143 #  define LL_MAX_HEADER 128
  144 # else
  145 #  define LL_MAX_HEADER 96
  146 # endif
  147 #else
  148 # define LL_MAX_HEADER 32
  149 #endif
  150 
  151 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  152     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  153 #define MAX_HEADER LL_MAX_HEADER
  154 #else
  155 #define MAX_HEADER (LL_MAX_HEADER + 48)
  156 #endif
  157 
  158 /*
  159  *	Old network device statistics. Fields are native words
  160  *	(unsigned long) so they can be read and written atomically.
  161  */
  162 
  163 struct net_device_stats {
  164 	unsigned long	rx_packets;
  165 	unsigned long	tx_packets;
  166 	unsigned long	rx_bytes;
  167 	unsigned long	tx_bytes;
  168 	unsigned long	rx_errors;
  169 	unsigned long	tx_errors;
  170 	unsigned long	rx_dropped;
  171 	unsigned long	tx_dropped;
  172 	unsigned long	multicast;
  173 	unsigned long	collisions;
  174 	unsigned long	rx_length_errors;
  175 	unsigned long	rx_over_errors;
  176 	unsigned long	rx_crc_errors;
  177 	unsigned long	rx_frame_errors;
  178 	unsigned long	rx_fifo_errors;
  179 	unsigned long	rx_missed_errors;
  180 	unsigned long	tx_aborted_errors;
  181 	unsigned long	tx_carrier_errors;
  182 	unsigned long	tx_fifo_errors;
  183 	unsigned long	tx_heartbeat_errors;
  184 	unsigned long	tx_window_errors;
  185 	unsigned long	rx_compressed;
  186 	unsigned long	tx_compressed;
  187 };
  188 
  189 
  190 #include <linux/cache.h>
  191 #include <linux/skbuff.h>
  192 
  193 #ifdef CONFIG_RPS
  194 #include <linux/static_key.h>
  195 extern struct static_key rps_needed;
  196 #endif
  197 
  198 struct neighbour;
  199 struct neigh_parms;
  200 struct sk_buff;
  201 
  202 struct netdev_hw_addr {
  203 	struct list_head	list;
  204 	unsigned char		addr[MAX_ADDR_LEN];
  205 	unsigned char		type;
  206 #define NETDEV_HW_ADDR_T_LAN		1
  207 #define NETDEV_HW_ADDR_T_SAN		2
  208 #define NETDEV_HW_ADDR_T_SLAVE		3
  209 #define NETDEV_HW_ADDR_T_UNICAST	4
  210 #define NETDEV_HW_ADDR_T_MULTICAST	5
  211 	bool			global_use;
  212 	int			sync_cnt;
  213 	int			refcount;
  214 	int			synced;
  215 	struct rcu_head		rcu_head;
  216 };
  217 
  218 struct netdev_hw_addr_list {
  219 	struct list_head	list;
  220 	int			count;
  221 };
  222 
  223 #define netdev_hw_addr_list_count(l) ((l)->count)
  224 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  225 #define netdev_hw_addr_list_for_each(ha, l) \
  226 	list_for_each_entry(ha, &(l)->list, list)
  227 
  228 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  229 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  230 #define netdev_for_each_uc_addr(ha, dev) \
  231 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  232 
  233 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  234 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  235 #define netdev_for_each_mc_addr(ha, dev) \
  236 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  237 
  238 struct hh_cache {
  239 	u16		hh_len;
  240 	u16		__pad;
  241 	seqlock_t	hh_lock;
  242 
  243 	/* cached hardware header; allow for machine alignment needs.        */
  244 #define HH_DATA_MOD	16
  245 #define HH_DATA_OFF(__len) \
  246 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  247 #define HH_DATA_ALIGN(__len) \
  248 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  249 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  250 };
  251 
  252 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  253  * Alternative is:
  254  *   dev->hard_header_len ? (dev->hard_header_len +
  255  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  256  *
  257  * We could use other alignment values, but we must maintain the
  258  * relationship HH alignment <= LL alignment.
  259  */
  260 #define LL_RESERVED_SPACE(dev) \
  261 	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  263 	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  264 
  265 struct header_ops {
  266 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
  267 			   unsigned short type, const void *daddr,
  268 			   const void *saddr, unsigned int len);
  269 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
  270 	int	(*rebuild)(struct sk_buff *skb);
  271 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  272 	void	(*cache_update)(struct hh_cache *hh,
  273 				const struct net_device *dev,
  274 				const unsigned char *haddr);
  275 };
  276 
  277 /* These flag bits are private to the generic network queueing
  278  * layer, they may not be explicitly referenced by any other
  279  * code.
  280  */
  281 
  282 enum netdev_state_t {
  283 	__LINK_STATE_START,
  284 	__LINK_STATE_PRESENT,
  285 	__LINK_STATE_NOCARRIER,
  286 	__LINK_STATE_LINKWATCH_PENDING,
  287 	__LINK_STATE_DORMANT,
  288 };
  289 
  290 
  291 /*
  292  * This structure holds at boot time configured netdevice settings. They
  293  * are then used in the device probing.
  294  */
  295 struct netdev_boot_setup {
  296 	char name[IFNAMSIZ];
  297 	struct ifmap map;
  298 };
  299 #define NETDEV_BOOT_SETUP_MAX 8
  300 
  301 int __init netdev_boot_setup(char *str);
  302 
  303 /*
  304  * Structure for NAPI scheduling similar to tasklet but with weighting
  305  */
  306 struct napi_struct {
  307 	/* The poll_list must only be managed by the entity which
  308 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
  309 	 * whoever atomically sets that bit can add this napi_struct
  310 	 * to the per-cpu poll_list, and whoever clears that bit
  311 	 * can remove from the list right before clearing the bit.
  312 	 */
  313 	struct list_head	poll_list;
  314 
  315 	unsigned long		state;
  316 	int			weight;
  317 	unsigned int		gro_count;
  318 	int			(*poll)(struct napi_struct *, int);
  319 #ifdef CONFIG_NETPOLL
  320 	spinlock_t		poll_lock;
  321 	int			poll_owner;
  322 #endif
  323 	struct net_device	*dev;
  324 	struct sk_buff		*gro_list;
  325 	struct sk_buff		*skb;
  326 	struct list_head	dev_list;
  327 	struct hlist_node	napi_hash_node;
  328 	unsigned int		napi_id;
  329 };
  330 
  331 enum {
  332 	NAPI_STATE_SCHED,	/* Poll is scheduled */
  333 	NAPI_STATE_DISABLE,	/* Disable pending */
  334 	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
  335 	NAPI_STATE_HASHED,	/* In NAPI hash */
  336 };
  337 
  338 enum gro_result {
  339 	GRO_MERGED,
  340 	GRO_MERGED_FREE,
  341 	GRO_HELD,
  342 	GRO_NORMAL,
  343 	GRO_DROP,
  344 };
  345 typedef enum gro_result gro_result_t;
  346 
  347 /*
  348  * enum rx_handler_result - Possible return values for rx_handlers.
  349  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  350  * further.
  351  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  352  * case skb->dev was changed by rx_handler.
  353  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  354  * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
  355  *
  356  * rx_handlers are functions called from inside __netif_receive_skb(), to do
  357  * special processing of the skb, prior to delivery to protocol handlers.
  358  *
  359  * Currently, a net_device can only have a single rx_handler registered. Trying
  360  * to register a second rx_handler will return -EBUSY.
  361  *
  362  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  363  * To unregister a rx_handler on a net_device, use
  364  * netdev_rx_handler_unregister().
  365  *
  366  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  367  * do with the skb.
  368  *
  369  * If the rx_handler consumed to skb in some way, it should return
  370  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  371  * the skb to be delivered in some other ways.
  372  *
  373  * If the rx_handler changed skb->dev, to divert the skb to another
  374  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  375  * new device will be called if it exists.
  376  *
  377  * If the rx_handler consider the skb should be ignored, it should return
  378  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  379  * are registered on exact device (ptype->dev == skb->dev).
  380  *
  381  * If the rx_handler didn't changed skb->dev, but want the skb to be normally
  382  * delivered, it should return RX_HANDLER_PASS.
  383  *
  384  * A device without a registered rx_handler will behave as if rx_handler
  385  * returned RX_HANDLER_PASS.
  386  */
  387 
  388 enum rx_handler_result {
  389 	RX_HANDLER_CONSUMED,
  390 	RX_HANDLER_ANOTHER,
  391 	RX_HANDLER_EXACT,
  392 	RX_HANDLER_PASS,
  393 };
  394 typedef enum rx_handler_result rx_handler_result_t;
  395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  396 
  397 void __napi_schedule(struct napi_struct *n);
  398 
  399 static inline bool napi_disable_pending(struct napi_struct *n)
  400 {
  401 	return test_bit(NAPI_STATE_DISABLE, &n->state);
  402 }
  403 
  404 /**
  405  *	napi_schedule_prep - check if napi can be scheduled
  406  *	@n: napi context
  407  *
  408  * Test if NAPI routine is already running, and if not mark
  409  * it as running.  This is used as a condition variable
  410  * insure only one NAPI poll instance runs.  We also make
  411  * sure there is no pending NAPI disable.
  412  */
  413 static inline bool napi_schedule_prep(struct napi_struct *n)
  414 {
  415 	return !napi_disable_pending(n) &&
  416 		!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  417 }
  418 
  419 /**
  420  *	napi_schedule - schedule NAPI poll
  421  *	@n: napi context
  422  *
  423  * Schedule NAPI poll routine to be called if it is not already
  424  * running.
  425  */
  426 static inline void napi_schedule(struct napi_struct *n)
  427 {
  428 	if (napi_schedule_prep(n))
  429 		__napi_schedule(n);
  430 }
  431 
  432 /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
  433 static inline bool napi_reschedule(struct napi_struct *napi)
  434 {
  435 	if (napi_schedule_prep(napi)) {
  436 		__napi_schedule(napi);
  437 		return true;
  438 	}
  439 	return false;
  440 }
  441 
  442 /**
  443  *	napi_complete - NAPI processing complete
  444  *	@n: napi context
  445  *
  446  * Mark NAPI processing as complete.
  447  */
  448 void __napi_complete(struct napi_struct *n);
  449 void napi_complete(struct napi_struct *n);
  450 
  451 /**
  452  *	napi_by_id - lookup a NAPI by napi_id
  453  *	@napi_id: hashed napi_id
  454  *
  455  * lookup @napi_id in napi_hash table
  456  * must be called under rcu_read_lock()
  457  */
  458 struct napi_struct *napi_by_id(unsigned int napi_id);
  459 
  460 /**
  461  *	napi_hash_add - add a NAPI to global hashtable
  462  *	@napi: napi context
  463  *
  464  * generate a new napi_id and store a @napi under it in napi_hash
  465  */
  466 void napi_hash_add(struct napi_struct *napi);
  467 
  468 /**
  469  *	napi_hash_del - remove a NAPI from global table
  470  *	@napi: napi context
  471  *
  472  * Warning: caller must observe rcu grace period
  473  * before freeing memory containing @napi
  474  */
  475 void napi_hash_del(struct napi_struct *napi);
  476 
  477 /**
  478  *	napi_disable - prevent NAPI from scheduling
  479  *	@n: napi context
  480  *
  481  * Stop NAPI from being scheduled on this context.
  482  * Waits till any outstanding processing completes.
  483  */
  484 static inline void napi_disable(struct napi_struct *n)
  485 {
  486 	might_sleep();
  487 	set_bit(NAPI_STATE_DISABLE, &n->state);
  488 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  489 		msleep(1);
  490 	clear_bit(NAPI_STATE_DISABLE, &n->state);
  491 }
  492 
  493 /**
  494  *	napi_enable - enable NAPI scheduling
  495  *	@n: napi context
  496  *
  497  * Resume NAPI from being scheduled on this context.
  498  * Must be paired with napi_disable.
  499  */
  500 static inline void napi_enable(struct napi_struct *n)
  501 {
  502 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  503 	smp_mb__before_clear_bit();
  504 	clear_bit(NAPI_STATE_SCHED, &n->state);
  505 }
  506 
  507 #ifdef CONFIG_SMP
  508 /**
  509  *	napi_synchronize - wait until NAPI is not running
  510  *	@n: napi context
  511  *
  512  * Wait until NAPI is done being scheduled on this context.
  513  * Waits till any outstanding processing completes but
  514  * does not disable future activations.
  515  */
  516 static inline void napi_synchronize(const struct napi_struct *n)
  517 {
  518 	while (test_bit(NAPI_STATE_SCHED, &n->state))
  519 		msleep(1);
  520 }
  521 #else
  522 # define napi_synchronize(n)	barrier()
  523 #endif
  524 
  525 enum netdev_queue_state_t {
  526 	__QUEUE_STATE_DRV_XOFF,
  527 	__QUEUE_STATE_STACK_XOFF,
  528 	__QUEUE_STATE_FROZEN,
  529 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)		| \
  530 			      (1 << __QUEUE_STATE_STACK_XOFF))
  531 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF		| \
  532 					(1 << __QUEUE_STATE_FROZEN))
  533 };
  534 /*
  535  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
  536  * netif_tx_* functions below are used to manipulate this flag.  The
  537  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
  538  * queue independently.  The netif_xmit_*stopped functions below are called
  539  * to check if the queue has been stopped by the driver or stack (either
  540  * of the XOFF bits are set in the state).  Drivers should not need to call
  541  * netif_xmit*stopped functions, they should only be using netif_tx_*.
  542  */
  543 
  544 struct netdev_queue {
  545 /*
  546  * read mostly part
  547  */
  548 	struct net_device	*dev;
  549 	struct Qdisc		*qdisc;
  550 	struct Qdisc		*qdisc_sleeping;
  551 #ifdef CONFIG_SYSFS
  552 	struct kobject		kobj;
  553 #endif
  554 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  555 	int			numa_node;
  556 #endif
  557 /*
  558  * write mostly part
  559  */
  560 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  561 	int			xmit_lock_owner;
  562 	/*
  563 	 * please use this field instead of dev->trans_start
  564 	 */
  565 	unsigned long		trans_start;
  566 
  567 	/*
  568 	 * Number of TX timeouts for this queue
  569 	 * (/sys/class/net/DEV/Q/trans_timeout)
  570 	 */
  571 	unsigned long		trans_timeout;
  572 
  573 	unsigned long		state;
  574 
  575 #ifdef CONFIG_BQL
  576 	struct dql		dql;
  577 #endif
  578 } ____cacheline_aligned_in_smp;
  579 
  580 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  581 {
  582 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  583 	return q->numa_node;
  584 #else
  585 	return NUMA_NO_NODE;
  586 #endif
  587 }
  588 
  589 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  590 {
  591 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  592 	q->numa_node = node;
  593 #endif
  594 }
  595 
  596 #ifdef CONFIG_RPS
  597 /*
  598  * This structure holds an RPS map which can be of variable length.  The
  599  * map is an array of CPUs.
  600  */
  601 struct rps_map {
  602 	unsigned int len;
  603 	struct rcu_head rcu;
  604 	u16 cpus[0];
  605 };
  606 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
  607 
  608 /*
  609  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  610  * tail pointer for that CPU's input queue at the time of last enqueue, and
  611  * a hardware filter index.
  612  */
  613 struct rps_dev_flow {
  614 	u16 cpu;
  615 	u16 filter;
  616 	unsigned int last_qtail;
  617 };
  618 #define RPS_NO_FILTER 0xffff
  619 
  620 /*
  621  * The rps_dev_flow_table structure contains a table of flow mappings.
  622  */
  623 struct rps_dev_flow_table {
  624 	unsigned int mask;
  625 	struct rcu_head rcu;
  626 	struct rps_dev_flow flows[0];
  627 };
  628 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  629     ((_num) * sizeof(struct rps_dev_flow)))
  630 
  631 /*
  632  * The rps_sock_flow_table contains mappings of flows to the last CPU
  633  * on which they were processed by the application (set in recvmsg).
  634  */
  635 struct rps_sock_flow_table {
  636 	unsigned int mask;
  637 	u16 ents[0];
  638 };
  639 #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
  640     ((_num) * sizeof(u16)))
  641 
  642 #define RPS_NO_CPU 0xffff
  643 
  644 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  645 					u32 hash)
  646 {
  647 	if (table && hash) {
  648 		unsigned int cpu, index = hash & table->mask;
  649 
  650 		/* We only give a hint, preemption can change cpu under us */
  651 		cpu = raw_smp_processor_id();
  652 
  653 		if (table->ents[index] != cpu)
  654 			table->ents[index] = cpu;
  655 	}
  656 }
  657 
  658 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
  659 				       u32 hash)
  660 {
  661 	if (table && hash)
  662 		table->ents[hash & table->mask] = RPS_NO_CPU;
  663 }
  664 
  665 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  666 
  667 #ifdef CONFIG_RFS_ACCEL
  668 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
  669 			 u16 filter_id);
  670 #endif
  671 #endif /* CONFIG_RPS */
  672 
  673 /* This structure contains an instance of an RX queue. */
  674 struct netdev_rx_queue {
  675 #ifdef CONFIG_RPS
  676 	struct rps_map __rcu		*rps_map;
  677 	struct rps_dev_flow_table __rcu	*rps_flow_table;
  678 #endif
  679 	struct kobject			kobj;
  680 	struct net_device		*dev;
  681 } ____cacheline_aligned_in_smp;
  682 
  683 /*
  684  * RX queue sysfs structures and functions.
  685  */
  686 struct rx_queue_attribute {
  687 	struct attribute attr;
  688 	ssize_t (*show)(struct netdev_rx_queue *queue,
  689 	    struct rx_queue_attribute *attr, char *buf);
  690 	ssize_t (*store)(struct netdev_rx_queue *queue,
  691 	    struct rx_queue_attribute *attr, const char *buf, size_t len);
  692 };
  693 
  694 #ifdef CONFIG_XPS
  695 /*
  696  * This structure holds an XPS map which can be of variable length.  The
  697  * map is an array of queues.
  698  */
  699 struct xps_map {
  700 	unsigned int len;
  701 	unsigned int alloc_len;
  702 	struct rcu_head rcu;
  703 	u16 queues[0];
  704 };
  705 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
  706 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))	\
  707     / sizeof(u16))
  708 
  709 /*
  710  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
  711  */
  712 struct xps_dev_maps {
  713 	struct rcu_head rcu;
  714 	struct xps_map __rcu *cpu_map[0];
  715 };
  716 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\
  717     (nr_cpu_ids * sizeof(struct xps_map *)))
  718 #endif /* CONFIG_XPS */
  719 
  720 #define TC_MAX_QUEUE	16
  721 #define TC_BITMASK	15
  722 /* HW offloaded queuing disciplines txq count and offset maps */
  723 struct netdev_tc_txq {
  724 	u16 count;
  725 	u16 offset;
  726 };
  727 
  728 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  729 /*
  730  * This structure is to hold information about the device
  731  * configured to run FCoE protocol stack.
  732  */
  733 struct netdev_fcoe_hbainfo {
  734 	char	manufacturer[64];
  735 	char	serial_number[64];
  736 	char	hardware_version[64];
  737 	char	driver_version[64];
  738 	char	optionrom_version[64];
  739 	char	firmware_version[64];
  740 	char	model[256];
  741 	char	model_description[256];
  742 };
  743 #endif
  744 
  745 #define MAX_PHYS_PORT_ID_LEN 32
  746 
  747 /* This structure holds a unique identifier to identify the
  748  * physical port used by a netdevice.
  749  */
  750 struct netdev_phys_port_id {
  751 	unsigned char id[MAX_PHYS_PORT_ID_LEN];
  752 	unsigned char id_len;
  753 };
  754 
  755 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  756 				       struct sk_buff *skb);
  757 
  758 /*
  759  * This structure defines the management hooks for network devices.
  760  * The following hooks can be defined; unless noted otherwise, they are
  761  * optional and can be filled with a null pointer.
  762  *
  763  * int (*ndo_init)(struct net_device *dev);
  764  *     This function is called once when network device is registered.
  765  *     The network device can use this to any late stage initializaton
  766  *     or semantic validattion. It can fail with an error code which will
  767  *     be propogated back to register_netdev
  768  *
  769  * void (*ndo_uninit)(struct net_device *dev);
  770  *     This function is called when device is unregistered or when registration
  771  *     fails. It is not called if init fails.
  772  *
  773  * int (*ndo_open)(struct net_device *dev);
  774  *     This function is called when network device transistions to the up
  775  *     state.
  776  *
  777  * int (*ndo_stop)(struct net_device *dev);
  778  *     This function is called when network device transistions to the down
  779  *     state.
  780  *
  781  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  782  *                               struct net_device *dev);
  783  *	Called when a packet needs to be transmitted.
  784  *	Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
  785  *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  786  *	Required can not be NULL.
  787  *
  788  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  789  *                         void *accel_priv, select_queue_fallback_t fallback);
  790  *	Called to decide which queue to when device supports multiple
  791  *	transmit queues.
  792  *
  793  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  794  *	This function is called to allow device receiver to make
  795  *	changes to configuration when multicast or promiscious is enabled.
  796  *
  797  * void (*ndo_set_rx_mode)(struct net_device *dev);
  798  *	This function is called device changes address list filtering.
  799  *	If driver handles unicast address filtering, it should set
  800  *	IFF_UNICAST_FLT to its priv_flags.
  801  *
  802  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  803  *	This function  is called when the Media Access Control address
  804  *	needs to be changed. If this interface is not defined, the
  805  *	mac address can not be changed.
  806  *
  807  * int (*ndo_validate_addr)(struct net_device *dev);
  808  *	Test if Media Access Control address is valid for the device.
  809  *
  810  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  811  *	Called when a user request an ioctl which can't be handled by
  812  *	the generic interface code. If not defined ioctl's return
  813  *	not supported error code.
  814  *
  815  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  816  *	Used to set network devices bus interface parameters. This interface
  817  *	is retained for legacy reason, new devices should use the bus
  818  *	interface (PCI) for low level management.
  819  *
  820  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  821  *	Called when a user wants to change the Maximum Transfer Unit
  822  *	of a device. If not defined, any request to change MTU will
  823  *	will return an error.
  824  *
  825  * void (*ndo_tx_timeout)(struct net_device *dev);
  826  *	Callback uses when the transmitter has not made any progress
  827  *	for dev->watchdog ticks.
  828  *
  829  * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  830  *                      struct rtnl_link_stats64 *storage);
  831  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  832  *	Called when a user wants to get the network device usage
  833  *	statistics. Drivers must do one of the following:
  834  *	1. Define @ndo_get_stats64 to fill in a zero-initialised
  835  *	   rtnl_link_stats64 structure passed by the caller.
  836  *	2. Define @ndo_get_stats to update a net_device_stats structure
  837  *	   (which should normally be dev->stats) and return a pointer to
  838  *	   it. The structure may be changed asynchronously only if each
  839  *	   field is written atomically.
  840  *	3. Update dev->stats asynchronously and atomically, and define
  841  *	   neither operation.
  842  *
  843  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
  844  *	If device support VLAN filtering this function is called when a
  845  *	VLAN id is registered.
  846  *
  847  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  848  *	If device support VLAN filtering this function is called when a
  849  *	VLAN id is unregistered.
  850  *
  851  * void (*ndo_poll_controller)(struct net_device *dev);
  852  *
  853  *	SR-IOV management functions.
  854  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  855  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
  856  * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
  857  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  858  * int (*ndo_get_vf_config)(struct net_device *dev,
  859  *			    int vf, struct ifla_vf_info *ivf);
  860  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
  861  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  862  *			  struct nlattr *port[]);
  863  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  864  * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
  865  * 	Called to setup 'tc' number of traffic classes in the net device. This
  866  * 	is always called from the stack with the rtnl lock held and netif tx
  867  * 	queues stopped. This allows the netdevice to perform queue management
  868  * 	safely.
  869  *
  870  *	Fiber Channel over Ethernet (FCoE) offload functions.
  871  * int (*ndo_fcoe_enable)(struct net_device *dev);
  872  *	Called when the FCoE protocol stack wants to start using LLD for FCoE
  873  *	so the underlying device can perform whatever needed configuration or
  874  *	initialization to support acceleration of FCoE traffic.
  875  *
  876  * int (*ndo_fcoe_disable)(struct net_device *dev);
  877  *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
  878  *	so the underlying device can perform whatever needed clean-ups to
  879  *	stop supporting acceleration of FCoE traffic.
  880  *
  881  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  882  *			     struct scatterlist *sgl, unsigned int sgc);
  883  *	Called when the FCoE Initiator wants to initialize an I/O that
  884  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  885  *	perform necessary setup and returns 1 to indicate the device is set up
  886  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  887  *
  888  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
  889  *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
  890  *	indicated by the FC exchange id 'xid', so the underlying device can
  891  *	clean up and reuse resources for later DDP requests.
  892  *
  893  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  894  *			      struct scatterlist *sgl, unsigned int sgc);
  895  *	Called when the FCoE Target wants to initialize an I/O that
  896  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  897  *	perform necessary setup and returns 1 to indicate the device is set up
  898  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  899  *
  900  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  901  *			       struct netdev_fcoe_hbainfo *hbainfo);
  902  *	Called when the FCoE Protocol stack wants information on the underlying
  903  *	device. This information is utilized by the FCoE protocol stack to
  904  *	register attributes with Fiber Channel management service as per the
  905  *	FC-GS Fabric Device Management Information(FDMI) specification.
  906  *
  907  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  908  *	Called when the underlying device wants to override default World Wide
  909  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
  910  *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
  911  *	protocol stack to use.
  912  *
  913  *	RFS acceleration.
  914  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
  915  *			    u16 rxq_index, u32 flow_id);
  916  *	Set hardware filter for RFS.  rxq_index is the target queue index;
  917  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
  918  *	Return the filter ID on success, or a negative error code.
  919  *
  920  *	Slave management functions (for bridge, bonding, etc).
  921  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
  922  *	Called to make another netdev an underling.
  923  *
  924  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
  925  *	Called to release previously enslaved netdev.
  926  *
  927  *      Feature/offload setting functions.
  928  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  929  *		netdev_features_t features);
  930  *	Adjusts the requested feature flags according to device-specific
  931  *	constraints, and returns the resulting flags. Must not modify
  932  *	the device state.
  933  *
  934  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  935  *	Called to update device configuration to new features. Passed
  936  *	feature set might be less than what was returned by ndo_fix_features()).
  937  *	Must return >0 or -errno if it changed dev->features itself.
  938  *
  939  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
  940  *		      struct net_device *dev,
  941  *		      const unsigned char *addr, u16 flags)
  942  *	Adds an FDB entry to dev for addr.
  943  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
  944  *		      struct net_device *dev,
  945  *		      const unsigned char *addr)
  946  *	Deletes the FDB entry from dev coresponding to addr.
  947  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
  948  *		       struct net_device *dev, int idx)
  949  *	Used to add FDB entries to dump requests. Implementers should add
  950  *	entries to skb and update idx with the number of entries.
  951  *
  952  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
  953  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
  954  *			     struct net_device *dev, u32 filter_mask)
  955  *
  956  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  957  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
  958  *	which do not represent real hardware may define this to allow their
  959  *	userspace components to manage their virtual carrier state. Devices
  960  *	that determine carrier state from physical hardware properties (eg
  961  *	network cables) or protocol-dependent mechanisms (eg
  962  *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
  963  *
  964  * int (*ndo_get_phys_port_id)(struct net_device *dev,
  965  *			       struct netdev_phys_port_id *ppid);
  966  *	Called to get ID of physical port of this device. If driver does
  967  *	not implement this, it is assumed that the hw is not able to have
  968  *	multiple net devices on single physical port.
  969  *
  970  * void (*ndo_add_vxlan_port)(struct  net_device *dev,
  971  *			      sa_family_t sa_family, __be16 port);
  972  *	Called by vxlan to notiy a driver about the UDP port and socket
  973  *	address family that vxlan is listnening to. It is called only when
  974  *	a new port starts listening. The operation is protected by the
  975  *	vxlan_net->sock_lock.
  976  *
  977  * void (*ndo_del_vxlan_port)(struct  net_device *dev,
  978  *			      sa_family_t sa_family, __be16 port);
  979  *	Called by vxlan to notify the driver about a UDP port and socket
  980  *	address family that vxlan is not listening to anymore. The operation
  981  *	is protected by the vxlan_net->sock_lock.
  982  *
  983  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
  984  *				 struct net_device *dev)
  985  *	Called by upper layer devices to accelerate switching or other
  986  *	station functionality into hardware. 'pdev is the lowerdev
  987  *	to use for the offload and 'dev' is the net device that will
  988  *	back the offload. Returns a pointer to the private structure
  989  *	the upper layer will maintain.
  990  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
  991  *	Called by upper layer device to delete the station created
  992  *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
  993  *	the station and priv is the structure returned by the add
  994  *	operation.
  995  * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
  996  *				      struct net_device *dev,
  997  *				      void *priv);
  998  *	Callback to use for xmit over the accelerated station. This
  999  *	is used in place of ndo_start_xmit on accelerated net
 1000  *	devices.
 1001  */
 1002 struct net_device_ops {
 1003 	int			(*ndo_init)(struct net_device *dev);
 1004 	void			(*ndo_uninit)(struct net_device *dev);
 1005 	int			(*ndo_open)(struct net_device *dev);
 1006 	int			(*ndo_stop)(struct net_device *dev);
 1007 	netdev_tx_t		(*ndo_start_xmit) (struct sk_buff *skb,
 1008 						   struct net_device *dev);
 1009 	u16			(*ndo_select_queue)(struct net_device *dev,
 1010 						    struct sk_buff *skb,
 1011 						    void *accel_priv,
 1012 						    select_queue_fallback_t fallback);
 1013 	void			(*ndo_change_rx_flags)(struct net_device *dev,
 1014 						       int flags);
 1015 	void			(*ndo_set_rx_mode)(struct net_device *dev);
 1016 	int			(*ndo_set_mac_address)(struct net_device *dev,
 1017 						       void *addr);
 1018 	int			(*ndo_validate_addr)(struct net_device *dev);
 1019 	int			(*ndo_do_ioctl)(struct net_device *dev,
 1020 					        struct ifreq *ifr, int cmd);
 1021 	int			(*ndo_set_config)(struct net_device *dev,
 1022 					          struct ifmap *map);
 1023 	int			(*ndo_change_mtu)(struct net_device *dev,
 1024 						  int new_mtu);
 1025 	int			(*ndo_neigh_setup)(struct net_device *dev,
 1026 						   struct neigh_parms *);
 1027 	void			(*ndo_tx_timeout) (struct net_device *dev);
 1028 
 1029 	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
 1030 						     struct rtnl_link_stats64 *storage);
 1031 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 1032 
 1033 	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
 1034 						       __be16 proto, u16 vid);
 1035 	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
 1036 						        __be16 proto, u16 vid);
 1037 #ifdef CONFIG_NET_POLL_CONTROLLER
 1038 	void                    (*ndo_poll_controller)(struct net_device *dev);
 1039 	int			(*ndo_netpoll_setup)(struct net_device *dev,
 1040 						     struct netpoll_info *info,
 1041 						     gfp_t gfp);
 1042 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
 1043 #endif
 1044 #ifdef CONFIG_NET_RX_BUSY_POLL
 1045 	int			(*ndo_busy_poll)(struct napi_struct *dev);
 1046 #endif
 1047 	int			(*ndo_set_vf_mac)(struct net_device *dev,
 1048 						  int queue, u8 *mac);
 1049 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
 1050 						   int queue, u16 vlan, u8 qos);
 1051 	int			(*ndo_set_vf_tx_rate)(struct net_device *dev,
 1052 						      int vf, int rate);
 1053 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
 1054 						       int vf, bool setting);
 1055 	int			(*ndo_get_vf_config)(struct net_device *dev,
 1056 						     int vf,
 1057 						     struct ifla_vf_info *ivf);
 1058 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
 1059 							 int vf, int link_state);
 1060 	int			(*ndo_set_vf_port)(struct net_device *dev,
 1061 						   int vf,
 1062 						   struct nlattr *port[]);
 1063 	int			(*ndo_get_vf_port)(struct net_device *dev,
 1064 						   int vf, struct sk_buff *skb);
 1065 	int			(*ndo_setup_tc)(struct net_device *dev, u8 tc);
 1066 #if IS_ENABLED(CONFIG_FCOE)
 1067 	int			(*ndo_fcoe_enable)(struct net_device *dev);
 1068 	int			(*ndo_fcoe_disable)(struct net_device *dev);
 1069 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
 1070 						      u16 xid,
 1071 						      struct scatterlist *sgl,
 1072 						      unsigned int sgc);
 1073 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
 1074 						     u16 xid);
 1075 	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
 1076 						       u16 xid,
 1077 						       struct scatterlist *sgl,
 1078 						       unsigned int sgc);
 1079 	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 1080 							struct netdev_fcoe_hbainfo *hbainfo);
 1081 #endif
 1082 
 1083 #if IS_ENABLED(CONFIG_LIBFCOE)
 1084 #define NETDEV_FCOE_WWNN 0
 1085 #define NETDEV_FCOE_WWPN 1
 1086 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
 1087 						    u64 *wwn, int type);
 1088 #endif
 1089 
 1090 #ifdef CONFIG_RFS_ACCEL
 1091 	int			(*ndo_rx_flow_steer)(struct net_device *dev,
 1092 						     const struct sk_buff *skb,
 1093 						     u16 rxq_index,
 1094 						     u32 flow_id);
 1095 #endif
 1096 	int			(*ndo_add_slave)(struct net_device *dev,
 1097 						 struct net_device *slave_dev);
 1098 	int			(*ndo_del_slave)(struct net_device *dev,
 1099 						 struct net_device *slave_dev);
 1100 	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
 1101 						    netdev_features_t features);
 1102 	int			(*ndo_set_features)(struct net_device *dev,
 1103 						    netdev_features_t features);
 1104 	int			(*ndo_neigh_construct)(struct neighbour *n);
 1105 	void			(*ndo_neigh_destroy)(struct neighbour *n);
 1106 
 1107 	int			(*ndo_fdb_add)(struct ndmsg *ndm,
 1108 					       struct nlattr *tb[],
 1109 					       struct net_device *dev,
 1110 					       const unsigned char *addr,
 1111 					       u16 flags);
 1112 	int			(*ndo_fdb_del)(struct ndmsg *ndm,
 1113 					       struct nlattr *tb[],
 1114 					       struct net_device *dev,
 1115 					       const unsigned char *addr);
 1116 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
 1117 						struct netlink_callback *cb,
 1118 						struct net_device *dev,
 1119 						int idx);
 1120 
 1121 	int			(*ndo_bridge_setlink)(struct net_device *dev,
 1122 						      struct nlmsghdr *nlh);
 1123 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
 1124 						      u32 pid, u32 seq,
 1125 						      struct net_device *dev,
 1126 						      u32 filter_mask);
 1127 	int			(*ndo_bridge_dellink)(struct net_device *dev,
 1128 						      struct nlmsghdr *nlh);
 1129 	int			(*ndo_change_carrier)(struct net_device *dev,
 1130 						      bool new_carrier);
 1131 	int			(*ndo_get_phys_port_id)(struct net_device *dev,
 1132 							struct netdev_phys_port_id *ppid);
 1133 	void			(*ndo_add_vxlan_port)(struct  net_device *dev,
 1134 						      sa_family_t sa_family,
 1135 						      __be16 port);
 1136 	void			(*ndo_del_vxlan_port)(struct  net_device *dev,
 1137 						      sa_family_t sa_family,
 1138 						      __be16 port);
 1139 
 1140 	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
 1141 							struct net_device *dev);
 1142 	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
 1143 							void *priv);
 1144 
 1145 	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
 1146 							struct net_device *dev,
 1147 							void *priv);
 1148 };
 1149 
 1150 /*
 1151  *	The DEVICE structure.
 1152  *	Actually, this whole structure is a big mistake.  It mixes I/O
 1153  *	data with strictly "high-level" data, and it has to know about
 1154  *	almost every data structure used in the INET module.
 1155  *
 1156  *	FIXME: cleanup struct net_device such that network protocol info
 1157  *	moves out.
 1158  */
 1159 
 1160 struct net_device {
 1161 
 1162 	/*
 1163 	 * This is the first field of the "visible" part of this structure
 1164 	 * (i.e. as seen by users in the "Space.c" file).  It is the name
 1165 	 * of the interface.
 1166 	 */
 1167 	char			name[IFNAMSIZ];
 1168 
 1169 	/* device name hash chain, please keep it close to name[] */
 1170 	struct hlist_node	name_hlist;
 1171 
 1172 	/* snmp alias */
 1173 	char 			*ifalias;
 1174 
 1175 	/*
 1176 	 *	I/O specific fields
 1177 	 *	FIXME: Merge these and struct ifmap into one
 1178 	 */
 1179 	unsigned long		mem_end;	/* shared mem end	*/
 1180 	unsigned long		mem_start;	/* shared mem start	*/
 1181 	unsigned long		base_addr;	/* device I/O address	*/
 1182 	int			irq;		/* device IRQ number	*/
 1183 
 1184 	/*
 1185 	 *	Some hardware also needs these fields, but they are not
 1186 	 *	part of the usual set specified in Space.c.
 1187 	 */
 1188 
 1189 	unsigned long		state;
 1190 
 1191 	struct list_head	dev_list;
 1192 	struct list_head	napi_list;
 1193 	struct list_head	unreg_list;
 1194 	struct list_head	close_list;
 1195 
 1196 	/* directly linked devices, like slaves for bonding */
 1197 	struct {
 1198 		struct list_head upper;
 1199 		struct list_head lower;
 1200 	} adj_list;
 1201 
 1202 	/* all linked devices, *including* neighbours */
 1203 	struct {
 1204 		struct list_head upper;
 1205 		struct list_head lower;
 1206 	} all_adj_list;
 1207 
 1208 
 1209 	/* currently active device features */
 1210 	netdev_features_t	features;
 1211 	/* user-changeable features */
 1212 	netdev_features_t	hw_features;
 1213 	/* user-requested features */
 1214 	netdev_features_t	wanted_features;
 1215 	/* mask of features inheritable by VLAN devices */
 1216 	netdev_features_t	vlan_features;
 1217 	/* mask of features inherited by encapsulating devices
 1218 	 * This field indicates what encapsulation offloads
 1219 	 * the hardware is capable of doing, and drivers will
 1220 	 * need to set them appropriately.
 1221 	 */
 1222 	netdev_features_t	hw_enc_features;
 1223 	/* mask of fetures inheritable by MPLS */
 1224 	netdev_features_t	mpls_features;
 1225 
 1226 	/* Interface index. Unique device identifier	*/
 1227 	int			ifindex;
 1228 	int			iflink;
 1229 
 1230 	struct net_device_stats	stats;
 1231 	atomic_long_t		rx_dropped; /* dropped packets by core network
 1232 					     * Do not use this in drivers.
 1233 					     */
 1234 
 1235 #ifdef CONFIG_WIRELESS_EXT
 1236 	/* List of functions to handle Wireless Extensions (instead of ioctl).
 1237 	 * See <net/iw_handler.h> for details. Jean II */
 1238 	const struct iw_handler_def *	wireless_handlers;
 1239 	/* Instance data managed by the core of Wireless Extensions. */
 1240 	struct iw_public_data *	wireless_data;
 1241 #endif
 1242 	/* Management operations */
 1243 	const struct net_device_ops *netdev_ops;
 1244 	const struct ethtool_ops *ethtool_ops;
 1245 	const struct forwarding_accel_ops *fwd_ops;
 1246 
 1247 	/* Hardware header description */
 1248 	const struct header_ops *header_ops;
 1249 
 1250 	unsigned int		flags;	/* interface flags (a la BSD)	*/
 1251 	unsigned int		priv_flags; /* Like 'flags' but invisible to userspace.
 1252 					     * See if.h for definitions. */
 1253 	unsigned short		gflags;
 1254 	unsigned short		padded;	/* How much padding added by alloc_netdev() */
 1255 
 1256 	unsigned char		operstate; /* RFC2863 operstate */
 1257 	unsigned char		link_mode; /* mapping policy to operstate */
 1258 
 1259 	unsigned char		if_port;	/* Selectable AUI, TP,..*/
 1260 	unsigned char		dma;		/* DMA channel		*/
 1261 
 1262 	unsigned int		mtu;	/* interface MTU value		*/
 1263 	unsigned short		type;	/* interface hardware type	*/
 1264 	unsigned short		hard_header_len;	/* hardware hdr length	*/
 1265 
 1266 	/* extra head- and tailroom the hardware may need, but not in all cases
 1267 	 * can this be guaranteed, especially tailroom. Some cases also use
 1268 	 * LL_MAX_HEADER instead to allocate the skb.
 1269 	 */
 1270 	unsigned short		needed_headroom;
 1271 	unsigned short		needed_tailroom;
 1272 
 1273 	/* Interface address info. */
 1274 	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
 1275 	unsigned char		addr_assign_type; /* hw address assignment type */
 1276 	unsigned char		addr_len;	/* hardware address length	*/
 1277 	unsigned short		neigh_priv_len;
 1278 	unsigned short          dev_id;		/* Used to differentiate devices
 1279 						 * that share the same link
 1280 						 * layer address
 1281 						 */
 1282 	spinlock_t		addr_list_lock;
 1283 	struct netdev_hw_addr_list	uc;	/* Unicast mac addresses */
 1284 	struct netdev_hw_addr_list	mc;	/* Multicast mac addresses */
 1285 	struct netdev_hw_addr_list	dev_addrs; /* list of device
 1286 						    * hw addresses
 1287 						    */
 1288 #ifdef CONFIG_SYSFS
 1289 	struct kset		*queues_kset;
 1290 #endif
 1291 
 1292 	bool			uc_promisc;
 1293 	unsigned int		promiscuity;
 1294 	unsigned int		allmulti;
 1295 
 1296 
 1297 	/* Protocol specific pointers */
 1298 
 1299 #if IS_ENABLED(CONFIG_VLAN_8021Q)
 1300 	struct vlan_info __rcu	*vlan_info;	/* VLAN info */
 1301 #endif
 1302 #if IS_ENABLED(CONFIG_NET_DSA)
 1303 	struct dsa_switch_tree	*dsa_ptr;	/* dsa specific data */
 1304 #endif
 1305 #if IS_ENABLED(CONFIG_TIPC)
 1306 	struct tipc_bearer __rcu *tipc_ptr;	/* TIPC specific data */
 1307 #endif
 1308 	void 			*atalk_ptr;	/* AppleTalk link 	*/
 1309 	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
 1310 	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
 1311 	struct inet6_dev __rcu	*ip6_ptr;       /* IPv6 specific data */
 1312 	void			*ax25_ptr;	/* AX.25 specific data */
 1313 	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data,
 1314 						   assign before registering */
 1315 
 1316 /*
 1317  * Cache lines mostly used on receive path (including eth_type_trans())
 1318  */
 1319 	unsigned long		last_rx;	/* Time of last Rx
 1320 						 * This should not be set in
 1321 						 * drivers, unless really needed,
 1322 						 * because network stack (bonding)
 1323 						 * use it if/when necessary, to
 1324 						 * avoid dirtying this cache line.
 1325 						 */
 1326 
 1327 	/* Interface address info used in eth_type_trans() */
 1328 	unsigned char		*dev_addr;	/* hw address, (before bcast
 1329 						   because most packets are
 1330 						   unicast) */
 1331 
 1332 
 1333 #ifdef CONFIG_SYSFS
 1334 	struct netdev_rx_queue	*_rx;
 1335 
 1336 	/* Number of RX queues allocated at register_netdev() time */
 1337 	unsigned int		num_rx_queues;
 1338 
 1339 	/* Number of RX queues currently active in device */
 1340 	unsigned int		real_num_rx_queues;
 1341 
 1342 #endif
 1343 
 1344 	rx_handler_func_t __rcu	*rx_handler;
 1345 	void __rcu		*rx_handler_data;
 1346 
 1347 	struct netdev_queue __rcu *ingress_queue;
 1348 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/
 1349 
 1350 
 1351 /*
 1352  * Cache lines mostly used on transmit path
 1353  */
 1354 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
 1355 
 1356 	/* Number of TX queues allocated at alloc_netdev_mq() time  */
 1357 	unsigned int		num_tx_queues;
 1358 
 1359 	/* Number of TX queues currently active in device  */
 1360 	unsigned int		real_num_tx_queues;
 1361 
 1362 	/* root qdisc from userspace point of view */
 1363 	struct Qdisc		*qdisc;
 1364 
 1365 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
 1366 	spinlock_t		tx_global_lock;
 1367 
 1368 #ifdef CONFIG_XPS
 1369 	struct xps_dev_maps __rcu *xps_maps;
 1370 #endif
 1371 #ifdef CONFIG_RFS_ACCEL
 1372 	/* CPU reverse-mapping for RX completion interrupts, indexed
 1373 	 * by RX queue number.  Assigned by driver.  This must only be
 1374 	 * set if the ndo_rx_flow_steer operation is defined. */
 1375 	struct cpu_rmap		*rx_cpu_rmap;
 1376 #endif
 1377 
 1378 	/* These may be needed for future network-power-down code. */
 1379 
 1380 	/*
 1381 	 * trans_start here is expensive for high speed devices on SMP,
 1382 	 * please use netdev_queue->trans_start instead.
 1383 	 */
 1384 	unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/
 1385 
 1386 	int			watchdog_timeo; /* used by dev_watchdog() */
 1387 	struct timer_list	watchdog_timer;
 1388 
 1389 	/* Number of references to this device */
 1390 	int __percpu		*pcpu_refcnt;
 1391 
 1392 	/* delayed register/unregister */
 1393 	struct list_head	todo_list;
 1394 	/* device index hash chain */
 1395 	struct hlist_node	index_hlist;
 1396 
 1397 	struct list_head	link_watch_list;
 1398 
 1399 	/* register/unregister state machine */
 1400 	enum { NETREG_UNINITIALIZED=0,
 1401 	       NETREG_REGISTERED,	/* completed register_netdevice */
 1402 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
 1403 	       NETREG_UNREGISTERED,	/* completed unregister todo */
 1404 	       NETREG_RELEASED,		/* called free_netdev */
 1405 	       NETREG_DUMMY,		/* dummy device for NAPI poll */
 1406 	} reg_state:8;
 1407 
 1408 	bool dismantle; /* device is going do be freed */
 1409 
 1410 	enum {
 1411 		RTNL_LINK_INITIALIZED,
 1412 		RTNL_LINK_INITIALIZING,
 1413 	} rtnl_link_state:16;
 1414 
 1415 	/* Called from unregister, can be used to call free_netdev */
 1416 	void (*destructor)(struct net_device *dev);
 1417 
 1418 #ifdef CONFIG_NETPOLL
 1419 	struct netpoll_info __rcu	*npinfo;
 1420 #endif
 1421 
 1422 #ifdef CONFIG_NET_NS
 1423 	/* Network namespace this network device is inside */
 1424 	struct net		*nd_net;
 1425 #endif
 1426 
 1427 	/* mid-layer private */
 1428 	union {
 1429 		void				*ml_priv;
 1430 		struct pcpu_lstats __percpu	*lstats; /* loopback stats */
 1431 		struct pcpu_sw_netstats __percpu	*tstats;
 1432 		struct pcpu_dstats __percpu	*dstats; /* dummy stats */
 1433 		struct pcpu_vstats __percpu	*vstats; /* veth stats */
 1434 	};
 1435 	/* GARP */
 1436 	struct garp_port __rcu	*garp_port;
 1437 	/* MRP */
 1438 	struct mrp_port __rcu	*mrp_port;
 1439 
 1440 	/* class/net/name entry */
 1441 	struct device		dev;
 1442 	/* space for optional device, statistics, and wireless sysfs groups */
 1443 	const struct attribute_group *sysfs_groups[4];
 1444 	/* space for optional per-rx queue attributes */
 1445 	const struct attribute_group *sysfs_rx_queue_group;
 1446 
 1447 	/* rtnetlink link ops */
 1448 	const struct rtnl_link_ops *rtnl_link_ops;
 1449 
 1450 	/* for setting kernel sock attribute on TCP connection setup */
 1451 #define GSO_MAX_SIZE		65536
 1452 	unsigned int		gso_max_size;
 1453 #define GSO_MAX_SEGS		65535
 1454 	u16			gso_max_segs;
 1455 
 1456 #ifdef CONFIG_DCB
 1457 	/* Data Center Bridging netlink ops */
 1458 	const struct dcbnl_rtnl_ops *dcbnl_ops;
 1459 #endif
 1460 	u8 num_tc;
 1461 	struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
 1462 	u8 prio_tc_map[TC_BITMASK + 1];
 1463 
 1464 #if IS_ENABLED(CONFIG_FCOE)
 1465 	/* max exchange id for FCoE LRO by ddp */
 1466 	unsigned int		fcoe_ddp_xid;
 1467 #endif
 1468 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
 1469 	struct netprio_map __rcu *priomap;
 1470 #endif
 1471 	/* phy device may attach itself for hardware timestamping */
 1472 	struct phy_device *phydev;
 1473 
 1474 	struct lock_class_key *qdisc_tx_busylock;
 1475 
 1476 	/* group the device belongs to */
 1477 	int group;
 1478 
 1479 	struct pm_qos_request	pm_qos_req;
 1480 };
 1481 #define to_net_dev(d) container_of(d, struct net_device, dev)
 1482 
 1483 #define	NETDEV_ALIGN		32
 1484 
 1485 static inline
 1486 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
 1487 {
 1488 	return dev->prio_tc_map[prio & TC_BITMASK];
 1489 }
 1490 
 1491 static inline
 1492 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
 1493 {
 1494 	if (tc >= dev->num_tc)
 1495 		return -EINVAL;
 1496 
 1497 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
 1498 	return 0;
 1499 }
 1500 
 1501 static inline
 1502 void netdev_reset_tc(struct net_device *dev)
 1503 {
 1504 	dev->num_tc = 0;
 1505 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
 1506 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
 1507 }
 1508 
 1509 static inline
 1510 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
 1511 {
 1512 	if (tc >= dev->num_tc)
 1513 		return -EINVAL;
 1514 
 1515 	dev->tc_to_txq[tc].count = count;
 1516 	dev->tc_to_txq[tc].offset = offset;
 1517 	return 0;
 1518 }
 1519 
 1520 static inline
 1521 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
 1522 {
 1523 	if (num_tc > TC_MAX_QUEUE)
 1524 		return -EINVAL;
 1525 
 1526 	dev->num_tc = num_tc;
 1527 	return 0;
 1528 }
 1529 
 1530 static inline
 1531 int netdev_get_num_tc(struct net_device *dev)
 1532 {
 1533 	return dev->num_tc;
 1534 }
 1535 
 1536 static inline
 1537 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
 1538 					 unsigned int index)
 1539 {
 1540 	return &dev->_tx[index];
 1541 }
 1542 
 1543 static inline void netdev_for_each_tx_queue(struct net_device *dev,
 1544 					    void (*f)(struct net_device *,
 1545 						      struct netdev_queue *,
 1546 						      void *),
 1547 					    void *arg)
 1548 {
 1549 	unsigned int i;
 1550 
 1551 	for (i = 0; i < dev->num_tx_queues; i++)
 1552 		f(dev, &dev->_tx[i], arg);
 1553 }
 1554 
 1555 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 1556 				    struct sk_buff *skb,
 1557 				    void *accel_priv);
 1558 
 1559 /*
 1560  * Net namespace inlines
 1561  */
 1562 static inline
 1563 struct net *dev_net(const struct net_device *dev)
 1564 {
 1565 	return read_pnet(&dev->nd_net);
 1566 }
 1567 
 1568 static inline
 1569 void dev_net_set(struct net_device *dev, struct net *net)
 1570 {
 1571 #ifdef CONFIG_NET_NS
 1572 	release_net(dev->nd_net);
 1573 	dev->nd_net = hold_net(net);
 1574 #endif
 1575 }
 1576 
 1577 static inline bool netdev_uses_dsa_tags(struct net_device *dev)
 1578 {
 1579 #ifdef CONFIG_NET_DSA_TAG_DSA
 1580 	if (dev->dsa_ptr != NULL)
 1581 		return dsa_uses_dsa_tags(dev->dsa_ptr);
 1582 #endif
 1583 
 1584 	return 0;
 1585 }
 1586 
 1587 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
 1588 {
 1589 #ifdef CONFIG_NET_DSA_TAG_TRAILER
 1590 	if (dev->dsa_ptr != NULL)
 1591 		return dsa_uses_trailer_tags(dev->dsa_ptr);
 1592 #endif
 1593 
 1594 	return 0;
 1595 }
 1596 
 1597 /**
 1598  *	netdev_priv - access network device private data
 1599  *	@dev: network device
 1600  *
 1601  * Get network device private data
 1602  */
 1603 static inline void *netdev_priv(const struct net_device *dev)
 1604 {
 1605 	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
 1606 }
 1607 
 1608 /* Set the sysfs physical device reference for the network logical device
 1609  * if set prior to registration will cause a symlink during initialization.
 1610  */
 1611 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
 1612 
 1613 /* Set the sysfs device type for the network logical device to allow
 1614  * fine-grained identification of different network device types. For
 1615  * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
 1616  */
 1617 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
 1618 
 1619 /* Default NAPI poll() weight
 1620  * Device drivers are strongly advised to not use bigger value
 1621  */
 1622 #define NAPI_POLL_WEIGHT 64
 1623 
 1624 /**
 1625  *	netif_napi_add - initialize a napi context
 1626  *	@dev:  network device
 1627  *	@napi: napi context
 1628  *	@poll: polling function
 1629  *	@weight: default weight
 1630  *
 1631  * netif_napi_add() must be used to initialize a napi context prior to calling
 1632  * *any* of the other napi related functions.
 1633  */
 1634 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
 1635 		    int (*poll)(struct napi_struct *, int), int weight);
 1636 
 1637 /**
 1638  *  netif_napi_del - remove a napi context
 1639  *  @napi: napi context
 1640  *
 1641  *  netif_napi_del() removes a napi context from the network device napi list
 1642  */
 1643 void netif_napi_del(struct napi_struct *napi);
 1644 
 1645 struct napi_gro_cb {
 1646 	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
 1647 	void *frag0;
 1648 
 1649 	/* Length of frag0. */
 1650 	unsigned int frag0_len;
 1651 
 1652 	/* This indicates where we are processing relative to skb->data. */
 1653 	int data_offset;
 1654 
 1655 	/* This is non-zero if the packet cannot be merged with the new skb. */
 1656 	u16	flush;
 1657 
 1658 	/* Save the IP ID here and check when we get to the transport layer */
 1659 	u16	flush_id;
 1660 
 1661 	/* Number of segments aggregated. */
 1662 	u16	count;
 1663 
 1664 	/* This is non-zero if the packet may be of the same flow. */
 1665 	u8	same_flow;
 1666 
 1667 	/* Free the skb? */
 1668 	u8	free;
 1669 #define NAPI_GRO_FREE		  1
 1670 #define NAPI_GRO_FREE_STOLEN_HEAD 2
 1671 
 1672 	/* jiffies when first packet was created/queued */
 1673 	unsigned long age;
 1674 
 1675 	/* Used in ipv6_gro_receive() */
 1676 	u16	proto;
 1677 
 1678 	/* Used in udp_gro_receive */
 1679 	u16	udp_mark;
 1680 
 1681 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 1682 	__wsum	csum;
 1683 
 1684 	/* used in skb_gro_receive() slow path */
 1685 	struct sk_buff *last;
 1686 };
 1687 
 1688 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 1689 
 1690 struct packet_type {
 1691 	__be16			type;	/* This is really htons(ether_type). */
 1692 	struct net_device	*dev;	/* NULL is wildcarded here	     */
 1693 	int			(*func) (struct sk_buff *,
 1694 					 struct net_device *,
 1695 					 struct packet_type *,
 1696 					 struct net_device *);
 1697 	bool			(*id_match)(struct packet_type *ptype,
 1698 					    struct sock *sk);
 1699 	void			*af_packet_priv;
 1700 	struct list_head	list;
 1701 };
 1702 
 1703 struct offload_callbacks {
 1704 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
 1705 						netdev_features_t features);
 1706 	int			(*gso_send_check)(struct sk_buff *skb);
 1707 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 1708 					       struct sk_buff *skb);
 1709 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
 1710 };
 1711 
 1712 struct packet_offload {
 1713 	__be16			 type;	/* This is really htons(ether_type). */
 1714 	struct offload_callbacks callbacks;
 1715 	struct list_head	 list;
 1716 };
 1717 
 1718 struct udp_offload {
 1719 	__be16			 port;
 1720 	struct offload_callbacks callbacks;
 1721 };
 1722 
 1723 /* often modified stats are per cpu, other are shared (netdev->stats) */
 1724 struct pcpu_sw_netstats {
 1725 	u64     rx_packets;
 1726 	u64     rx_bytes;
 1727 	u64     tx_packets;
 1728 	u64     tx_bytes;
 1729 	struct u64_stats_sync   syncp;
 1730 };
 1731 
 1732 #include <linux/notifier.h>
 1733 
 1734 /* netdevice notifier chain. Please remember to update the rtnetlink
 1735  * notification exclusion list in rtnetlink_event() when adding new
 1736  * types.
 1737  */
 1738 #define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */
 1739 #define NETDEV_DOWN	0x0002
 1740 #define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface
 1741 				   detected a hardware crash and restarted
 1742 				   - we can use this eg to kick tcp sessions
 1743 				   once done */
 1744 #define NETDEV_CHANGE	0x0004	/* Notify device state change */
 1745 #define NETDEV_REGISTER 0x0005
 1746 #define NETDEV_UNREGISTER	0x0006
 1747 #define NETDEV_CHANGEMTU	0x0007 /* notify after mtu change happened */
 1748 #define NETDEV_CHANGEADDR	0x0008
 1749 #define NETDEV_GOING_DOWN	0x0009
 1750 #define NETDEV_CHANGENAME	0x000A
 1751 #define NETDEV_FEAT_CHANGE	0x000B
 1752 #define NETDEV_BONDING_FAILOVER 0x000C
 1753 #define NETDEV_PRE_UP		0x000D
 1754 #define NETDEV_PRE_TYPE_CHANGE	0x000E
 1755 #define NETDEV_POST_TYPE_CHANGE	0x000F
 1756 #define NETDEV_POST_INIT	0x0010
 1757 #define NETDEV_UNREGISTER_FINAL 0x0011
 1758 #define NETDEV_RELEASE		0x0012
 1759 #define NETDEV_NOTIFY_PEERS	0x0013
 1760 #define NETDEV_JOIN		0x0014
 1761 #define NETDEV_CHANGEUPPER	0x0015
 1762 #define NETDEV_RESEND_IGMP	0x0016
 1763 #define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
 1764 
 1765 int register_netdevice_notifier(struct notifier_block *nb);
 1766 int unregister_netdevice_notifier(struct notifier_block *nb);
 1767 
 1768 struct netdev_notifier_info {
 1769 	struct net_device *dev;
 1770 };
 1771 
 1772 struct netdev_notifier_change_info {
 1773 	struct netdev_notifier_info info; /* must be first */
 1774 	unsigned int flags_changed;
 1775 };
 1776 
 1777 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
 1778 					     struct net_device *dev)
 1779 {
 1780 	info->dev = dev;
 1781 }
 1782 
 1783 static inline struct net_device *
 1784 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
 1785 {
 1786 	return info->dev;
 1787 }
 1788 
 1789 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 1790 
 1791 
 1792 extern rwlock_t				dev_base_lock;		/* Device list lock */
 1793 
 1794 #define for_each_netdev(net, d)		\
 1795 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
 1796 #define for_each_netdev_reverse(net, d)	\
 1797 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
 1798 #define for_each_netdev_rcu(net, d)		\
 1799 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
 1800 #define for_each_netdev_safe(net, d, n)	\
 1801 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
 1802 #define for_each_netdev_continue(net, d)		\
 1803 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
 1804 #define for_each_netdev_continue_rcu(net, d)		\
 1805 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 1806 #define for_each_netdev_in_bond_rcu(bond, slave)	\
 1807 		for_each_netdev_rcu(&init_net, slave)	\
 1808 			if (netdev_master_upper_dev_get_rcu(slave) == bond)
 1809 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
 1810 
 1811 static inline struct net_device *next_net_device(struct net_device *dev)
 1812 {
 1813 	struct list_head *lh;
 1814 	struct net *net;
 1815 
 1816 	net = dev_net(dev);
 1817 	lh = dev->dev_list.next;
 1818 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 1819 }
 1820 
 1821 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
 1822 {
 1823 	struct list_head *lh;
 1824 	struct net *net;
 1825 
 1826 	net = dev_net(dev);
 1827 	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
 1828 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 1829 }
 1830 
 1831 static inline struct net_device *first_net_device(struct net *net)
 1832 {
 1833 	return list_empty(&net->dev_base_head) ? NULL :
 1834 		net_device_entry(net->dev_base_head.next);
 1835 }
 1836 
 1837 static inline struct net_device *first_net_device_rcu(struct net *net)
 1838 {
 1839 	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
 1840 
 1841 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 1842 }
 1843 
 1844 int netdev_boot_setup_check(struct net_device *dev);
 1845 unsigned long netdev_boot_base(const char *prefix, int unit);
 1846 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 1847 				       const char *hwaddr);
 1848 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
 1849 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
 1850 void dev_add_pack(struct packet_type *pt);
 1851 void dev_remove_pack(struct packet_type *pt);
 1852 void __dev_remove_pack(struct packet_type *pt);
 1853 void dev_add_offload(struct packet_offload *po);
 1854 void dev_remove_offload(struct packet_offload *po);
 1855 
 1856 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
 1857 					unsigned short mask);
 1858 struct net_device *dev_get_by_name(struct net *net, const char *name);
 1859 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
 1860 struct net_device *__dev_get_by_name(struct net *net, const char *name);
 1861 int dev_alloc_name(struct net_device *dev, const char *name);
 1862 int dev_open(struct net_device *dev);
 1863 int dev_close(struct net_device *dev);
 1864 void dev_disable_lro(struct net_device *dev);
 1865 int dev_loopback_xmit(struct sk_buff *newskb);
 1866 int dev_queue_xmit(struct sk_buff *skb);
 1867 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
 1868 int register_netdevice(struct net_device *dev);
 1869 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
 1870 void unregister_netdevice_many(struct list_head *head);
 1871 static inline void unregister_netdevice(struct net_device *dev)
 1872 {
 1873 	unregister_netdevice_queue(dev, NULL);
 1874 }
 1875 
 1876 int netdev_refcnt_read(const struct net_device *dev);
 1877 void free_netdev(struct net_device *dev);
 1878 void netdev_freemem(struct net_device *dev);
 1879 void synchronize_net(void);
 1880 int init_dummy_netdev(struct net_device *dev);
 1881 
 1882 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 1883 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 1884 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 1885 int netdev_get_name(struct net *net, char *name, int ifindex);
 1886 int dev_restart(struct net_device *dev);
 1887 #ifdef CONFIG_NETPOLL_TRAP
 1888 int netpoll_trap(void);
 1889 #endif
 1890 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 1891 
 1892 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 1893 {
 1894 	return NAPI_GRO_CB(skb)->data_offset;
 1895 }
 1896 
 1897 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
 1898 {
 1899 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
 1900 }
 1901 
 1902 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
 1903 {
 1904 	NAPI_GRO_CB(skb)->data_offset += len;
 1905 }
 1906 
 1907 static inline void *skb_gro_header_fast(struct sk_buff *skb,
 1908 					unsigned int offset)
 1909 {
 1910 	return NAPI_GRO_CB(skb)->frag0 + offset;
 1911 }
 1912 
 1913 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
 1914 {
 1915 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
 1916 }
 1917 
 1918 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
 1919 					unsigned int offset)
 1920 {
 1921 	if (!pskb_may_pull(skb, hlen))
 1922 		return NULL;
 1923 
 1924 	NAPI_GRO_CB(skb)->frag0 = NULL;
 1925 	NAPI_GRO_CB(skb)->frag0_len = 0;
 1926 	return skb->data + offset;
 1927 }
 1928 
 1929 static inline void *skb_gro_mac_header(struct sk_buff *skb)
 1930 {
 1931 	return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
 1932 }
 1933 
 1934 static inline void *skb_gro_network_header(struct sk_buff *skb)
 1935 {
 1936 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
 1937 	       skb_network_offset(skb);
 1938 }
 1939 
 1940 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
 1941 					const void *start, unsigned int len)
 1942 {
 1943 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 1944 		NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
 1945 						  csum_partial(start, len, 0));
 1946 }
 1947 
 1948 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 1949 				  unsigned short type,
 1950 				  const void *daddr, const void *saddr,
 1951 				  unsigned int len)
 1952 {
 1953 	if (!dev->header_ops || !dev->header_ops->create)
 1954 		return 0;
 1955 
 1956 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
 1957 }
 1958 
 1959 static inline int dev_parse_header(const struct sk_buff *skb,
 1960 				   unsigned char *haddr)
 1961 {
 1962 	const struct net_device *dev = skb->dev;
 1963 
 1964 	if (!dev->header_ops || !dev->header_ops->parse)
 1965 		return 0;
 1966 	return dev->header_ops->parse(skb, haddr);
 1967 }
 1968 
 1969 static inline int dev_rebuild_header(struct sk_buff *skb)
 1970 {
 1971 	const struct net_device *dev = skb->dev;
 1972 
 1973 	if (!dev->header_ops || !dev->header_ops->rebuild)
 1974 		return 0;
 1975 	return dev->header_ops->rebuild(skb);
 1976 }
 1977 
 1978 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
 1979 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 1980 static inline int unregister_gifconf(unsigned int family)
 1981 {
 1982 	return register_gifconf(family, NULL);
 1983 }
 1984 
 1985 #ifdef CONFIG_NET_FLOW_LIMIT
 1986 #define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
 1987 struct sd_flow_limit {
 1988 	u64			count;
 1989 	unsigned int		num_buckets;
 1990 	unsigned int		history_head;
 1991 	u16			history[FLOW_LIMIT_HISTORY];
 1992 	u8			buckets[];
 1993 };
 1994 
 1995 extern int netdev_flow_limit_table_len;
 1996 #endif /* CONFIG_NET_FLOW_LIMIT */
 1997 
 1998 /*
 1999  * Incoming packets are placed on per-cpu queues
 2000  */
 2001 struct softnet_data {
 2002 	struct Qdisc		*output_queue;
 2003 	struct Qdisc		**output_queue_tailp;
 2004 	struct list_head	poll_list;
 2005 	struct sk_buff		*completion_queue;
 2006 	struct sk_buff_head	process_queue;
 2007 
 2008 	/* stats */
 2009 	unsigned int		processed;
 2010 	unsigned int		time_squeeze;
 2011 	unsigned int		cpu_collision;
 2012 	unsigned int		received_rps;
 2013 
 2014 #ifdef CONFIG_RPS
 2015 	struct softnet_data	*rps_ipi_list;
 2016 
 2017 	/* Elements below can be accessed between CPUs for RPS */
 2018 	struct call_single_data	csd ____cacheline_aligned_in_smp;
 2019 	struct softnet_data	*rps_ipi_next;
 2020 	unsigned int		cpu;
 2021 	unsigned int		input_queue_head;
 2022 	unsigned int		input_queue_tail;
 2023 #endif
 2024 	unsigned int		dropped;
 2025 	struct sk_buff_head	input_pkt_queue;
 2026 	struct napi_struct	backlog;
 2027 
 2028 #ifdef CONFIG_NET_FLOW_LIMIT
 2029 	struct sd_flow_limit __rcu *flow_limit;
 2030 #endif
 2031 };
 2032 
 2033 static inline void input_queue_head_incr(struct softnet_data *sd)
 2034 {
 2035 #ifdef CONFIG_RPS
 2036 	sd->input_queue_head++;
 2037 #endif
 2038 }
 2039 
 2040 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 2041 					      unsigned int *qtail)
 2042 {
 2043 #ifdef CONFIG_RPS
 2044 	*qtail = ++sd->input_queue_tail;
 2045 #endif
 2046 }
 2047 
 2048 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 2049 
 2050 void __netif_schedule(struct Qdisc *q);
 2051 
 2052 static inline void netif_schedule_queue(struct netdev_queue *txq)
 2053 {
 2054 	if (!(txq->state & QUEUE_STATE_ANY_XOFF))
 2055 		__netif_schedule(txq->qdisc);
 2056 }
 2057 
 2058 static inline void netif_tx_schedule_all(struct net_device *dev)
 2059 {
 2060 	unsigned int i;
 2061 
 2062 	for (i = 0; i < dev->num_tx_queues; i++)
 2063 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
 2064 }
 2065 
 2066 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 2067 {
 2068 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2069 }
 2070 
 2071 /**
 2072  *	netif_start_queue - allow transmit
 2073  *	@dev: network device
 2074  *
 2075  *	Allow upper layers to call the device hard_start_xmit routine.
 2076  */
 2077 static inline void netif_start_queue(struct net_device *dev)
 2078 {
 2079 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
 2080 }
 2081 
 2082 static inline void netif_tx_start_all_queues(struct net_device *dev)
 2083 {
 2084 	unsigned int i;
 2085 
 2086 	for (i = 0; i < dev->num_tx_queues; i++) {
 2087 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2088 		netif_tx_start_queue(txq);
 2089 	}
 2090 }
 2091 
 2092 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
 2093 {
 2094 #ifdef CONFIG_NETPOLL_TRAP
 2095 	if (netpoll_trap()) {
 2096 		netif_tx_start_queue(dev_queue);
 2097 		return;
 2098 	}
 2099 #endif
 2100 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
 2101 		__netif_schedule(dev_queue->qdisc);
 2102 }
 2103 
 2104 /**
 2105  *	netif_wake_queue - restart transmit
 2106  *	@dev: network device
 2107  *
 2108  *	Allow upper layers to call the device hard_start_xmit routine.
 2109  *	Used for flow control when transmit resources are available.
 2110  */
 2111 static inline void netif_wake_queue(struct net_device *dev)
 2112 {
 2113 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
 2114 }
 2115 
 2116 static inline void netif_tx_wake_all_queues(struct net_device *dev)
 2117 {
 2118 	unsigned int i;
 2119 
 2120 	for (i = 0; i < dev->num_tx_queues; i++) {
 2121 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2122 		netif_tx_wake_queue(txq);
 2123 	}
 2124 }
 2125 
 2126 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 2127 {
 2128 	if (WARN_ON(!dev_queue)) {
 2129 		pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
 2130 		return;
 2131 	}
 2132 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2133 }
 2134 
 2135 /**
 2136  *	netif_stop_queue - stop transmitted packets
 2137  *	@dev: network device
 2138  *
 2139  *	Stop upper layers calling the device hard_start_xmit routine.
 2140  *	Used for flow control when transmit resources are unavailable.
 2141  */
 2142 static inline void netif_stop_queue(struct net_device *dev)
 2143 {
 2144 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
 2145 }
 2146 
 2147 static inline void netif_tx_stop_all_queues(struct net_device *dev)
 2148 {
 2149 	unsigned int i;
 2150 
 2151 	for (i = 0; i < dev->num_tx_queues; i++) {
 2152 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2153 		netif_tx_stop_queue(txq);
 2154 	}
 2155 }
 2156 
 2157 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 2158 {
 2159 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2160 }
 2161 
 2162 /**
 2163  *	netif_queue_stopped - test if transmit queue is flowblocked
 2164  *	@dev: network device
 2165  *
 2166  *	Test if transmit queue on device is currently unable to send.
 2167  */
 2168 static inline bool netif_queue_stopped(const struct net_device *dev)
 2169 {
 2170 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 2171 }
 2172 
 2173 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
 2174 {
 2175 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
 2176 }
 2177 
 2178 static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
 2179 {
 2180 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
 2181 }
 2182 
 2183 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 2184 					unsigned int bytes)
 2185 {
 2186 #ifdef CONFIG_BQL
 2187 	dql_queued(&dev_queue->dql, bytes);
 2188 
 2189 	if (likely(dql_avail(&dev_queue->dql) >= 0))
 2190 		return;
 2191 
 2192 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2193 
 2194 	/*
 2195 	 * The XOFF flag must be set before checking the dql_avail below,
 2196 	 * because in netdev_tx_completed_queue we update the dql_completed
 2197 	 * before checking the XOFF flag.
 2198 	 */
 2199 	smp_mb();
 2200 
 2201 	/* check again in case another CPU has just made room avail */
 2202 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
 2203 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2204 #endif
 2205 }
 2206 
 2207 /**
 2208  * 	netdev_sent_queue - report the number of bytes queued to hardware
 2209  * 	@dev: network device
 2210  * 	@bytes: number of bytes queued to the hardware device queue
 2211  *
 2212  * 	Report the number of bytes queued for sending/completion to the network
 2213  * 	device hardware queue. @bytes should be a good approximation and should
 2214  * 	exactly match netdev_completed_queue() @bytes
 2215  */
 2216 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
 2217 {
 2218 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
 2219 }
 2220 
 2221 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
 2222 					     unsigned int pkts, unsigned int bytes)
 2223 {
 2224 #ifdef CONFIG_BQL
 2225 	if (unlikely(!bytes))
 2226 		return;
 2227 
 2228 	dql_completed(&dev_queue->dql, bytes);
 2229 
 2230 	/*
 2231 	 * Without the memory barrier there is a small possiblity that
 2232 	 * netdev_tx_sent_queue will miss the update and cause the queue to
 2233 	 * be stopped forever
 2234 	 */
 2235 	smp_mb();
 2236 
 2237 	if (dql_avail(&dev_queue->dql) < 0)
 2238 		return;
 2239 
 2240 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
 2241 		netif_schedule_queue(dev_queue);
 2242 #endif
 2243 }
 2244 
 2245 /**
 2246  * 	netdev_completed_queue - report bytes and packets completed by device
 2247  * 	@dev: network device
 2248  * 	@pkts: actual number of packets sent over the medium
 2249  * 	@bytes: actual number of bytes sent over the medium
 2250  *
 2251  * 	Report the number of bytes and packets transmitted by the network device
 2252  * 	hardware queue over the physical medium, @bytes must exactly match the
 2253  * 	@bytes amount passed to netdev_sent_queue()
 2254  */
 2255 static inline void netdev_completed_queue(struct net_device *dev,
 2256 					  unsigned int pkts, unsigned int bytes)
 2257 {
 2258 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
 2259 }
 2260 
 2261 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
 2262 {
 2263 #ifdef CONFIG_BQL
 2264 	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
 2265 	dql_reset(&q->dql);
 2266 #endif
 2267 }
 2268 
 2269 /**
 2270  * 	netdev_reset_queue - reset the packets and bytes count of a network device
 2271  * 	@dev_queue: network device
 2272  *
 2273  * 	Reset the bytes and packet count of a network device and clear the
 2274  * 	software flow control OFF bit for this network device
 2275  */
 2276 static inline void netdev_reset_queue(struct net_device *dev_queue)
 2277 {
 2278 	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
 2279 }
 2280 
 2281 /**
 2282  * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
 2283  * 	@dev: network device
 2284  * 	@queue_index: given tx queue index
 2285  *
 2286  * 	Returns 0 if given tx queue index >= number of device tx queues,
 2287  * 	otherwise returns the originally passed tx queue index.
 2288  */
 2289 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
 2290 {
 2291 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
 2292 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
 2293 				     dev->name, queue_index,
 2294 				     dev->real_num_tx_queues);
 2295 		return 0;
 2296 	}
 2297 
 2298 	return queue_index;
 2299 }
 2300 
 2301 /**
 2302  *	netif_running - test if up
 2303  *	@dev: network device
 2304  *
 2305  *	Test if the device has been brought up.
 2306  */
 2307 static inline bool netif_running(const struct net_device *dev)
 2308 {
 2309 	return test_bit(__LINK_STATE_START, &dev->state);
 2310 }
 2311 
 2312 /*
 2313  * Routines to manage the subqueues on a device.  We only need start
 2314  * stop, and a check if it's stopped.  All other device management is
 2315  * done at the overall netdevice level.
 2316  * Also test the device if we're multiqueue.
 2317  */
 2318 
 2319 /**
 2320  *	netif_start_subqueue - allow sending packets on subqueue
 2321  *	@dev: network device
 2322  *	@queue_index: sub queue index
 2323  *
 2324  * Start individual transmit queue of a device with multiple transmit queues.
 2325  */
 2326 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
 2327 {
 2328 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2329 
 2330 	netif_tx_start_queue(txq);
 2331 }
 2332 
 2333 /**
 2334  *	netif_stop_subqueue - stop sending packets on subqueue
 2335  *	@dev: network device
 2336  *	@queue_index: sub queue index
 2337  *
 2338  * Stop individual transmit queue of a device with multiple transmit queues.
 2339  */
 2340 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
 2341 {
 2342 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2343 #ifdef CONFIG_NETPOLL_TRAP
 2344 	if (netpoll_trap())
 2345 		return;
 2346 #endif
 2347 	netif_tx_stop_queue(txq);
 2348 }
 2349 
 2350 /**
 2351  *	netif_subqueue_stopped - test status of subqueue
 2352  *	@dev: network device
 2353  *	@queue_index: sub queue index
 2354  *
 2355  * Check individual transmit queue of a device with multiple transmit queues.
 2356  */
 2357 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
 2358 					    u16 queue_index)
 2359 {
 2360 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2361 
 2362 	return netif_tx_queue_stopped(txq);
 2363 }
 2364 
 2365 static inline bool netif_subqueue_stopped(const struct net_device *dev,
 2366 					  struct sk_buff *skb)
 2367 {
 2368 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
 2369 }
 2370 
 2371 /**
 2372  *	netif_wake_subqueue - allow sending packets on subqueue
 2373  *	@dev: network device
 2374  *	@queue_index: sub queue index
 2375  *
 2376  * Resume individual transmit queue of a device with multiple transmit queues.
 2377  */
 2378 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 2379 {
 2380 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2381 #ifdef CONFIG_NETPOLL_TRAP
 2382 	if (netpoll_trap())
 2383 		return;
 2384 #endif
 2385 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
 2386 		__netif_schedule(txq->qdisc);
 2387 }
 2388 
 2389 #ifdef CONFIG_XPS
 2390 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
 2391 			u16 index);
 2392 #else
 2393 static inline int netif_set_xps_queue(struct net_device *dev,
 2394 				      const struct cpumask *mask,
 2395 				      u16 index)
 2396 {
 2397 	return 0;
 2398 }
 2399 #endif
 2400 
 2401 /*
 2402  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
 2403  * as a distribution range limit for the returned value.
 2404  */
 2405 static inline u16 skb_tx_hash(const struct net_device *dev,
 2406 			      const struct sk_buff *skb)
 2407 {
 2408 	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
 2409 }
 2410 
 2411 /**
 2412  *	netif_is_multiqueue - test if device has multiple transmit queues
 2413  *	@dev: network device
 2414  *
 2415  * Check if device has multiple transmit queues
 2416  */
 2417 static inline bool netif_is_multiqueue(const struct net_device *dev)
 2418 {
 2419 	return dev->num_tx_queues > 1;
 2420 }
 2421 
 2422 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 2423 
 2424 #ifdef CONFIG_SYSFS
 2425 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 2426 #else
 2427 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
 2428 						unsigned int rxq)
 2429 {
 2430 	return 0;
 2431 }
 2432 #endif
 2433 
 2434 static inline int netif_copy_real_num_queues(struct net_device *to_dev,
 2435 					     const struct net_device *from_dev)
 2436 {
 2437 	int err;
 2438 
 2439 	err = netif_set_real_num_tx_queues(to_dev,
 2440 					   from_dev->real_num_tx_queues);
 2441 	if (err)
 2442 		return err;
 2443 #ifdef CONFIG_SYSFS
 2444 	return netif_set_real_num_rx_queues(to_dev,
 2445 					    from_dev->real_num_rx_queues);
 2446 #else
 2447 	return 0;
 2448 #endif
 2449 }
 2450 
 2451 #ifdef CONFIG_SYSFS
 2452 static inline unsigned int get_netdev_rx_queue_index(
 2453 		struct netdev_rx_queue *queue)
 2454 {
 2455 	struct net_device *dev = queue->dev;
 2456 	int index = queue - dev->_rx;
 2457 
 2458 	BUG_ON(index >= dev->num_rx_queues);
 2459 	return index;
 2460 }
 2461 #endif
 2462 
 2463 #define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
 2464 int netif_get_num_default_rss_queues(void);
 2465 
 2466 enum skb_free_reason {
 2467 	SKB_REASON_CONSUMED,
 2468 	SKB_REASON_DROPPED,
 2469 };
 2470 
 2471 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
 2472 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
 2473 
 2474 /*
 2475  * It is not allowed to call kfree_skb() or consume_skb() from hardware
 2476  * interrupt context or with hardware interrupts being disabled.
 2477  * (in_irq() || irqs_disabled())
 2478  *
 2479  * We provide four helpers that can be used in following contexts :
 2480  *
 2481  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 2482  *  replacing kfree_skb(skb)
 2483  *
 2484  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 2485  *  Typically used in place of consume_skb(skb) in TX completion path
 2486  *
 2487  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 2488  *  replacing kfree_skb(skb)
 2489  *
 2490  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 2491  *  and consumed a packet. Used in place of consume_skb(skb)
 2492  */
 2493 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 2494 {
 2495 	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
 2496 }
 2497 
 2498 static inline void dev_consume_skb_irq(struct sk_buff *skb)
 2499 {
 2500 	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
 2501 }
 2502 
 2503 static inline void dev_kfree_skb_any(struct sk_buff *skb)
 2504 {
 2505 	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
 2506 }
 2507 
 2508 static inline void dev_consume_skb_any(struct sk_buff *skb)
 2509 {
 2510 	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 2511 }
 2512 
 2513 int netif_rx(struct sk_buff *skb);
 2514 int netif_rx_ni(struct sk_buff *skb);
 2515 int netif_receive_skb(struct sk_buff *skb);
 2516 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
 2517 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 2518 struct sk_buff *napi_get_frags(struct napi_struct *napi);
 2519 gro_result_t napi_gro_frags(struct napi_struct *napi);
 2520 struct packet_offload *gro_find_receive_by_type(__be16 type);
 2521 struct packet_offload *gro_find_complete_by_type(__be16 type);
 2522 
 2523 static inline void napi_free_frags(struct napi_struct *napi)
 2524 {
 2525 	kfree_skb(napi->skb);
 2526 	napi->skb = NULL;
 2527 }
 2528 
 2529 int netdev_rx_handler_register(struct net_device *dev,
 2530 			       rx_handler_func_t *rx_handler,
 2531 			       void *rx_handler_data);
 2532 void netdev_rx_handler_unregister(struct net_device *dev);
 2533 
 2534 bool dev_valid_name(const char *name);
 2535 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
 2536 int dev_ethtool(struct net *net, struct ifreq *);
 2537 unsigned int dev_get_flags(const struct net_device *);
 2538 int __dev_change_flags(struct net_device *, unsigned int flags);
 2539 int dev_change_flags(struct net_device *, unsigned int);
 2540 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
 2541 			unsigned int gchanges);
 2542 int dev_change_name(struct net_device *, const char *);
 2543 int dev_set_alias(struct net_device *, const char *, size_t);
 2544 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 2545 int dev_set_mtu(struct net_device *, int);
 2546 void dev_set_group(struct net_device *, int);
 2547 int dev_set_mac_address(struct net_device *, struct sockaddr *);
 2548 int dev_change_carrier(struct net_device *, bool new_carrier);
 2549 int dev_get_phys_port_id(struct net_device *dev,
 2550 			 struct netdev_phys_port_id *ppid);
 2551 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 2552 			struct netdev_queue *txq);
 2553 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 2554 
 2555 extern int		netdev_budget;
 2556 
 2557 /* Called by rtnetlink.c:rtnl_unlock() */
 2558 void netdev_run_todo(void);
 2559 
 2560 /**
 2561  *	dev_put - release reference to device
 2562  *	@dev: network device
 2563  *
 2564  * Release reference to device to allow it to be freed.
 2565  */
 2566 static inline void dev_put(struct net_device *dev)
 2567 {
 2568 	this_cpu_dec(*dev->pcpu_refcnt);
 2569 }
 2570 
 2571 /**
 2572  *	dev_hold - get reference to device
 2573  *	@dev: network device
 2574  *
 2575  * Hold reference to device to keep it from being freed.
 2576  */
 2577 static inline void dev_hold(struct net_device *dev)
 2578 {
 2579 	this_cpu_inc(*dev->pcpu_refcnt);
 2580 }
 2581 
 2582 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
 2583  * and _off may be called from IRQ context, but it is caller
 2584  * who is responsible for serialization of these calls.
 2585  *
 2586  * The name carrier is inappropriate, these functions should really be
 2587  * called netif_lowerlayer_*() because they represent the state of any
 2588  * kind of lower layer not just hardware media.
 2589  */
 2590 
 2591 void linkwatch_init_dev(struct net_device *dev);
 2592 void linkwatch_fire_event(struct net_device *dev);
 2593 void linkwatch_forget_dev(struct net_device *dev);
 2594 
 2595 /**
 2596  *	netif_carrier_ok - test if carrier present
 2597  *	@dev: network device
 2598  *
 2599  * Check if carrier is present on device
 2600  */
 2601 static inline bool netif_carrier_ok(const struct net_device *dev)
 2602 {
 2603 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 2604 }
 2605 
 2606 unsigned long dev_trans_start(struct net_device *dev);
 2607 
 2608 void __netdev_watchdog_up(struct net_device *dev);
 2609 
 2610 void netif_carrier_on(struct net_device *dev);
 2611 
 2612 void netif_carrier_off(struct net_device *dev);
 2613 
 2614 /**
 2615  *	netif_dormant_on - mark device as dormant.
 2616  *	@dev: network device
 2617  *
 2618  * Mark device as dormant (as per RFC2863).
 2619  *
 2620  * The dormant state indicates that the relevant interface is not
 2621  * actually in a condition to pass packets (i.e., it is not 'up') but is
 2622  * in a "pending" state, waiting for some external event.  For "on-
 2623  * demand" interfaces, this new state identifies the situation where the
 2624  * interface is waiting for events to place it in the up state.
 2625  *
 2626  */
 2627 static inline void netif_dormant_on(struct net_device *dev)
 2628 {
 2629 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
 2630 		linkwatch_fire_event(dev);
 2631 }
 2632 
 2633 /**
 2634  *	netif_dormant_off - set device as not dormant.
 2635  *	@dev: network device
 2636  *
 2637  * Device is not in dormant state.
 2638  */
 2639 static inline void netif_dormant_off(struct net_device *dev)
 2640 {
 2641 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
 2642 		linkwatch_fire_event(dev);
 2643 }
 2644 
 2645 /**
 2646  *	netif_dormant - test if carrier present
 2647  *	@dev: network device
 2648  *
 2649  * Check if carrier is present on device
 2650  */
 2651 static inline bool netif_dormant(const struct net_device *dev)
 2652 {
 2653 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
 2654 }
 2655 
 2656 
 2657 /**
 2658  *	netif_oper_up - test if device is operational
 2659  *	@dev: network device
 2660  *
 2661  * Check if carrier is operational
 2662  */
 2663 static inline bool netif_oper_up(const struct net_device *dev)
 2664 {
 2665 	return (dev->operstate == IF_OPER_UP ||
 2666 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
 2667 }
 2668 
 2669 /**
 2670  *	netif_device_present - is device available or removed
 2671  *	@dev: network device
 2672  *
 2673  * Check if device has not been removed from system.
 2674  */
 2675 static inline bool netif_device_present(struct net_device *dev)
 2676 {
 2677 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
 2678 }
 2679 
 2680 void netif_device_detach(struct net_device *dev);
 2681 
 2682 void netif_device_attach(struct net_device *dev);
 2683 
 2684 /*
 2685  * Network interface message level settings
 2686  */
 2687 
 2688 enum {
 2689 	NETIF_MSG_DRV		= 0x0001,
 2690 	NETIF_MSG_PROBE		= 0x0002,
 2691 	NETIF_MSG_LINK		= 0x0004,
 2692 	NETIF_MSG_TIMER		= 0x0008,
 2693 	NETIF_MSG_IFDOWN	= 0x0010,
 2694 	NETIF_MSG_IFUP		= 0x0020,
 2695 	NETIF_MSG_RX_ERR	= 0x0040,
 2696 	NETIF_MSG_TX_ERR	= 0x0080,
 2697 	NETIF_MSG_TX_QUEUED	= 0x0100,
 2698 	NETIF_MSG_INTR		= 0x0200,
 2699 	NETIF_MSG_TX_DONE	= 0x0400,
 2700 	NETIF_MSG_RX_STATUS	= 0x0800,
 2701 	NETIF_MSG_PKTDATA	= 0x1000,
 2702 	NETIF_MSG_HW		= 0x2000,
 2703 	NETIF_MSG_WOL		= 0x4000,
 2704 };
 2705 
 2706 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
 2707 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
 2708 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
 2709 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
 2710 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
 2711 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
 2712 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
 2713 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
 2714 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
 2715 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
 2716 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
 2717 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
 2718 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
 2719 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
 2720 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
 2721 
 2722 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 2723 {
 2724 	/* use default */
 2725 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
 2726 		return default_msg_enable_bits;
 2727 	if (debug_value == 0)	/* no output */
 2728 		return 0;
 2729 	/* set low N bits */
 2730 	return (1 << debug_value) - 1;
 2731 }
 2732 
 2733 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 2734 {
 2735 	spin_lock(&txq->_xmit_lock);
 2736 	txq->xmit_lock_owner = cpu;
 2737 }
 2738 
 2739 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 2740 {
 2741 	spin_lock_bh(&txq->_xmit_lock);
 2742 	txq->xmit_lock_owner = smp_processor_id();
 2743 }
 2744 
 2745 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 2746 {
 2747 	bool ok = spin_trylock(&txq->_xmit_lock);
 2748 	if (likely(ok))
 2749 		txq->xmit_lock_owner = smp_processor_id();
 2750 	return ok;
 2751 }
 2752 
 2753 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 2754 {
 2755 	txq->xmit_lock_owner = -1;
 2756 	spin_unlock(&txq->_xmit_lock);
 2757 }
 2758 
 2759 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 2760 {
 2761 	txq->xmit_lock_owner = -1;
 2762 	spin_unlock_bh(&txq->_xmit_lock);
 2763 }
 2764 
 2765 static inline void txq_trans_update(struct netdev_queue *txq)
 2766 {
 2767 	if (txq->xmit_lock_owner != -1)
 2768 		txq->trans_start = jiffies;
 2769 }
 2770 
 2771 /**
 2772  *	netif_tx_lock - grab network device transmit lock
 2773  *	@dev: network device
 2774  *
 2775  * Get network device transmit lock
 2776  */
 2777 static inline void netif_tx_lock(struct net_device *dev)
 2778 {
 2779 	unsigned int i;
 2780 	int cpu;
 2781 
 2782 	spin_lock(&dev->tx_global_lock);
 2783 	cpu = smp_processor_id();
 2784 	for (i = 0; i < dev->num_tx_queues; i++) {
 2785 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2786 
 2787 		/* We are the only thread of execution doing a
 2788 		 * freeze, but we have to grab the _xmit_lock in
 2789 		 * order to synchronize with threads which are in
 2790 		 * the ->hard_start_xmit() handler and already
 2791 		 * checked the frozen bit.
 2792 		 */
 2793 		__netif_tx_lock(txq, cpu);
 2794 		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
 2795 		__netif_tx_unlock(txq);
 2796 	}
 2797 }
 2798 
 2799 static inline void netif_tx_lock_bh(struct net_device *dev)
 2800 {
 2801 	local_bh_disable();
 2802 	netif_tx_lock(dev);
 2803 }
 2804 
 2805 static inline void netif_tx_unlock(struct net_device *dev)
 2806 {
 2807 	unsigned int i;
 2808 
 2809 	for (i = 0; i < dev->num_tx_queues; i++) {
 2810 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2811 
 2812 		/* No need to grab the _xmit_lock here.  If the
 2813 		 * queue is not stopped for another reason, we
 2814 		 * force a schedule.
 2815 		 */
 2816 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
 2817 		netif_schedule_queue(txq);
 2818 	}
 2819 	spin_unlock(&dev->tx_global_lock);
 2820 }
 2821 
 2822 static inline void netif_tx_unlock_bh(struct net_device *dev)
 2823 {
 2824 	netif_tx_unlock(dev);
 2825 	local_bh_enable();
 2826 }
 2827 
 2828 #define HARD_TX_LOCK(dev, txq, cpu) {			\
 2829 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 2830 		__netif_tx_lock(txq, cpu);		\
 2831 	}						\
 2832 }
 2833 
 2834 #define HARD_TX_UNLOCK(dev, txq) {			\
 2835 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 2836 		__netif_tx_unlock(txq);			\
 2837 	}						\
 2838 }
 2839 
 2840 static inline void netif_tx_disable(struct net_device *dev)
 2841 {
 2842 	unsigned int i;
 2843 	int cpu;
 2844 
 2845 	local_bh_disable();
 2846 	cpu = smp_processor_id();
 2847 	for (i = 0; i < dev->num_tx_queues; i++) {
 2848 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2849 
 2850 		__netif_tx_lock(txq, cpu);
 2851 		netif_tx_stop_queue(txq);
 2852 		__netif_tx_unlock(txq);
 2853 	}
 2854 	local_bh_enable();
 2855 }
 2856 
 2857 static inline void netif_addr_lock(struct net_device *dev)
 2858 {
 2859 	spin_lock(&dev->addr_list_lock);
 2860 }
 2861 
 2862 static inline void netif_addr_lock_nested(struct net_device *dev)
 2863 {
 2864 	spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
 2865 }
 2866 
 2867 static inline void netif_addr_lock_bh(struct net_device *dev)
 2868 {
 2869 	spin_lock_bh(&dev->addr_list_lock);
 2870 }
 2871 
 2872 static inline void netif_addr_unlock(struct net_device *dev)
 2873 {
 2874 	spin_unlock(&dev->addr_list_lock);
 2875 }
 2876 
 2877 static inline void netif_addr_unlock_bh(struct net_device *dev)
 2878 {
 2879 	spin_unlock_bh(&dev->addr_list_lock);
 2880 }
 2881 
 2882 /*
 2883  * dev_addrs walker. Should be used only for read access. Call with
 2884  * rcu_read_lock held.
 2885  */
 2886 #define for_each_dev_addr(dev, ha) \
 2887 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
 2888 
 2889 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 2890 
 2891 void ether_setup(struct net_device *dev);
 2892 
 2893 /* Support for loadable net-drivers */
 2894 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 2895 				    void (*setup)(struct net_device *),
 2896 				    unsigned int txqs, unsigned int rxqs);
 2897 #define alloc_netdev(sizeof_priv, name, setup) \
 2898 	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
 2899 
 2900 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
 2901 	alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
 2902 
 2903 int register_netdev(struct net_device *dev);
 2904 void unregister_netdev(struct net_device *dev);
 2905 
 2906 /* General hardware address lists handling functions */
 2907 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
 2908 		   struct netdev_hw_addr_list *from_list, int addr_len);
 2909 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
 2910 		      struct netdev_hw_addr_list *from_list, int addr_len);
 2911 void __hw_addr_init(struct netdev_hw_addr_list *list);
 2912 
 2913 /* Functions used for device addresses handling */
 2914 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
 2915 		 unsigned char addr_type);
 2916 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
 2917 		 unsigned char addr_type);
 2918 void dev_addr_flush(struct net_device *dev);
 2919 int dev_addr_init(struct net_device *dev);
 2920 
 2921 /* Functions used for unicast addresses handling */
 2922 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
 2923 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
 2924 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
 2925 int dev_uc_sync(struct net_device *to, struct net_device *from);
 2926 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
 2927 void dev_uc_unsync(struct net_device *to, struct net_device *from);
 2928 void dev_uc_flush(struct net_device *dev);
 2929 void dev_uc_init(struct net_device *dev);
 2930 
 2931 /* Functions used for multicast addresses handling */
 2932 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
 2933 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
 2934 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
 2935 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
 2936 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
 2937 int dev_mc_sync(struct net_device *to, struct net_device *from);
 2938 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
 2939 void dev_mc_unsync(struct net_device *to, struct net_device *from);
 2940 void dev_mc_flush(struct net_device *dev);
 2941 void dev_mc_init(struct net_device *dev);
 2942 
 2943 /* Functions used for secondary unicast and multicast support */
 2944 void dev_set_rx_mode(struct net_device *dev);
 2945 void __dev_set_rx_mode(struct net_device *dev);
 2946 int dev_set_promiscuity(struct net_device *dev, int inc);
 2947 int dev_set_allmulti(struct net_device *dev, int inc);
 2948 void netdev_state_change(struct net_device *dev);
 2949 void netdev_notify_peers(struct net_device *dev);
 2950 void netdev_features_change(struct net_device *dev);
 2951 /* Load a device via the kmod */
 2952 void dev_load(struct net *net, const char *name);
 2953 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 2954 					struct rtnl_link_stats64 *storage);
 2955 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 2956 			     const struct net_device_stats *netdev_stats);
 2957 
 2958 extern int		netdev_max_backlog;
 2959 extern int		netdev_tstamp_prequeue;
 2960 extern int		weight_p;
 2961 extern int		bpf_jit_enable;
 2962 
 2963 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 2964 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
 2965 						     struct list_head **iter);
 2966 
 2967 /* iterate through upper list, must be called under RCU read lock */
 2968 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
 2969 	for (iter = &(dev)->all_adj_list.upper, \
 2970 	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
 2971 	     updev; \
 2972 	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
 2973 
 2974 void *netdev_lower_get_next_private(struct net_device *dev,
 2975 				    struct list_head **iter);
 2976 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 2977 					struct list_head **iter);
 2978 
 2979 #define netdev_for_each_lower_private(dev, priv, iter) \
 2980 	for (iter = (dev)->adj_list.lower.next, \
 2981 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
 2982 	     priv; \
 2983 	     priv = netdev_lower_get_next_private(dev, &(iter)))
 2984 
 2985 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
 2986 	for (iter = &(dev)->adj_list.lower, \
 2987 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
 2988 	     priv; \
 2989 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 2990 
 2991 void *netdev_adjacent_get_private(struct list_head *adj_list);
 2992 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 2993 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
 2994 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
 2995 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
 2996 int netdev_master_upper_dev_link(struct net_device *dev,
 2997 				 struct net_device *upper_dev);
 2998 int netdev_master_upper_dev_link_private(struct net_device *dev,
 2999 					 struct net_device *upper_dev,
 3000 					 void *private);
 3001 void netdev_upper_dev_unlink(struct net_device *dev,
 3002 			     struct net_device *upper_dev);
 3003 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 3004 void *netdev_lower_dev_get_private(struct net_device *dev,
 3005 				   struct net_device *lower_dev);
 3006 int skb_checksum_help(struct sk_buff *skb);
 3007 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 3008 				  netdev_features_t features, bool tx_path);
 3009 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 3010 				    netdev_features_t features);
 3011 
 3012 static inline
 3013 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
 3014 {
 3015 	return __skb_gso_segment(skb, features, true);
 3016 }
 3017 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
 3018 
 3019 static inline bool can_checksum_protocol(netdev_features_t features,
 3020 					 __be16 protocol)
 3021 {
 3022 	return ((features & NETIF_F_GEN_CSUM) ||
 3023 		((features & NETIF_F_V4_CSUM) &&
 3024 		 protocol == htons(ETH_P_IP)) ||
 3025 		((features & NETIF_F_V6_CSUM) &&
 3026 		 protocol == htons(ETH_P_IPV6)) ||
 3027 		((features & NETIF_F_FCOE_CRC) &&
 3028 		 protocol == htons(ETH_P_FCOE)));
 3029 }
 3030 
 3031 #ifdef CONFIG_BUG
 3032 void netdev_rx_csum_fault(struct net_device *dev);
 3033 #else
 3034 static inline void netdev_rx_csum_fault(struct net_device *dev)
 3035 {
 3036 }
 3037 #endif
 3038 /* rx skb timestamps */
 3039 void net_enable_timestamp(void);
 3040 void net_disable_timestamp(void);
 3041 
 3042 #ifdef CONFIG_PROC_FS
 3043 int __init dev_proc_init(void);
 3044 #else
 3045 #define dev_proc_init() 0
 3046 #endif
 3047 
 3048 int netdev_class_create_file_ns(struct class_attribute *class_attr,
 3049 				const void *ns);
 3050 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
 3051 				 const void *ns);
 3052 
 3053 static inline int netdev_class_create_file(struct class_attribute *class_attr)
 3054 {
 3055 	return netdev_class_create_file_ns(class_attr, NULL);
 3056 }
 3057 
 3058 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
 3059 {
 3060 	netdev_class_remove_file_ns(class_attr, NULL);
 3061 }
 3062 
 3063 extern struct kobj_ns_type_operations net_ns_type_operations;
 3064 
 3065 const char *netdev_drivername(const struct net_device *dev);
 3066 
 3067 void linkwatch_run_queue(void);
 3068 
 3069 static inline netdev_features_t netdev_get_wanted_features(
 3070 	struct net_device *dev)
 3071 {
 3072 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
 3073 }
 3074 netdev_features_t netdev_increment_features(netdev_features_t all,
 3075 	netdev_features_t one, netdev_features_t mask);
 3076 
 3077 /* Allow TSO being used on stacked device :
 3078  * Performing the GSO segmentation before last device
 3079  * is a performance improvement.
 3080  */
 3081 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
 3082 							netdev_features_t mask)
 3083 {
 3084 	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
 3085 }
 3086 
 3087 int __netdev_update_features(struct net_device *dev);
 3088 void netdev_update_features(struct net_device *dev);
 3089 void netdev_change_features(struct net_device *dev);
 3090 
 3091 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 3092 					struct net_device *dev);
 3093 
 3094 netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
 3095 					 const struct net_device *dev);
 3096 static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
 3097 {
 3098 	return netif_skb_dev_features(skb, skb->dev);
 3099 }
 3100 
 3101 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 3102 {
 3103 	netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
 3104 
 3105 	/* check flags correspondence */
 3106 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
 3107 	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
 3108 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
 3109 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
 3110 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
 3111 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
 3112 
 3113 	return (features & feature) == feature;
 3114 }
 3115 
 3116 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
 3117 {
 3118 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
 3119 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 3120 }
 3121 
 3122 static inline bool netif_needs_gso(struct sk_buff *skb,
 3123 				   netdev_features_t features)
 3124 {
 3125 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
 3126 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
 3127 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
 3128 }
 3129 
 3130 static inline void netif_set_gso_max_size(struct net_device *dev,
 3131 					  unsigned int size)
 3132 {
 3133 	dev->gso_max_size = size;
 3134 }
 3135 
 3136 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
 3137 					int pulled_hlen, u16 mac_offset,
 3138 					int mac_len)
 3139 {
 3140 	skb->protocol = protocol;
 3141 	skb->encapsulation = 1;
 3142 	skb_push(skb, pulled_hlen);
 3143 	skb_reset_transport_header(skb);
 3144 	skb->mac_header = mac_offset;
 3145 	skb->network_header = skb->mac_header + mac_len;
 3146 	skb->mac_len = mac_len;
 3147 }
 3148 
 3149 static inline bool netif_is_macvlan(struct net_device *dev)
 3150 {
 3151 	return dev->priv_flags & IFF_MACVLAN;
 3152 }
 3153 
 3154 static inline bool netif_is_bond_master(struct net_device *dev)
 3155 {
 3156 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
 3157 }
 3158 
 3159 static inline bool netif_is_bond_slave(struct net_device *dev)
 3160 {
 3161 	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
 3162 }
 3163 
 3164 static inline bool netif_supports_nofcs(struct net_device *dev)
 3165 {
 3166 	return dev->priv_flags & IFF_SUPP_NOFCS;
 3167 }
 3168 
 3169 extern struct pernet_operations __net_initdata loopback_net_ops;
 3170 
 3171 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 3172 
 3173 /* netdev_printk helpers, similar to dev_printk */
 3174 
 3175 static inline const char *netdev_name(const struct net_device *dev)
 3176 {
 3177 	if (dev->reg_state != NETREG_REGISTERED)
 3178 		return "(unregistered net_device)";
 3179 	return dev->name;
 3180 }
 3181 
 3182 __printf(3, 4)
 3183 int netdev_printk(const char *level, const struct net_device *dev,
 3184 		  const char *format, ...);
 3185 __printf(2, 3)
 3186 int netdev_emerg(const struct net_device *dev, const char *format, ...);
 3187 __printf(2, 3)
 3188 int netdev_alert(const struct net_device *dev, const char *format, ...);
 3189 __printf(2, 3)
 3190 int netdev_crit(const struct net_device *dev, const char *format, ...);
 3191 __printf(2, 3)
 3192 int netdev_err(const struct net_device *dev, const char *format, ...);
 3193 __printf(2, 3)
 3194 int netdev_warn(const struct net_device *dev, const char *format, ...);
 3195 __printf(2, 3)
 3196 int netdev_notice(const struct net_device *dev, const char *format, ...);
 3197 __printf(2, 3)
 3198 int netdev_info(const struct net_device *dev, const char *format, ...);
 3199 
 3200 #define MODULE_ALIAS_NETDEV(device) \
 3201 	MODULE_ALIAS("netdev-" device)
 3202 
 3203 #if defined(CONFIG_DYNAMIC_DEBUG)
 3204 #define netdev_dbg(__dev, format, args...)			\
 3205 do {								\
 3206 	dynamic_netdev_dbg(__dev, format, ##args);		\
 3207 } while (0)
 3208 #elif defined(DEBUG)
 3209 #define netdev_dbg(__dev, format, args...)			\
 3210 	netdev_printk(KERN_DEBUG, __dev, format, ##args)
 3211 #else
 3212 #define netdev_dbg(__dev, format, args...)			\
 3213 ({								\
 3214 	if (0)							\
 3215 		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
 3216 	0;							\
 3217 })
 3218 #endif
 3219 
 3220 #if defined(VERBOSE_DEBUG)
 3221 #define netdev_vdbg	netdev_dbg
 3222 #else
 3223 
 3224 #define netdev_vdbg(dev, format, args...)			\
 3225 ({								\
 3226 	if (0)							\
 3227 		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
 3228 	0;							\
 3229 })
 3230 #endif
 3231 
 3232 /*
 3233  * netdev_WARN() acts like dev_printk(), but with the key difference
 3234  * of using a WARN/WARN_ON to get the message out, including the
 3235  * file/line information and a backtrace.
 3236  */
 3237 #define netdev_WARN(dev, format, args...)			\
 3238 	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
 3239 
 3240 /* netif printk helpers, similar to netdev_printk */
 3241 
 3242 #define netif_printk(priv, type, level, dev, fmt, args...)	\
 3243 do {					  			\
 3244 	if (netif_msg_##type(priv))				\
 3245 		netdev_printk(level, (dev), fmt, ##args);	\
 3246 } while (0)
 3247 
 3248 #define netif_level(level, priv, type, dev, fmt, args...)	\
 3249 do {								\
 3250 	if (netif_msg_##type(priv))				\
 3251 		netdev_##level(dev, fmt, ##args);		\
 3252 } while (0)
 3253 
 3254 #define netif_emerg(priv, type, dev, fmt, args...)		\
 3255 	netif_level(emerg, priv, type, dev, fmt, ##args)
 3256 #define netif_alert(priv, type, dev, fmt, args...)		\
 3257 	netif_level(alert, priv, type, dev, fmt, ##args)
 3258 #define netif_crit(priv, type, dev, fmt, args...)		\
 3259 	netif_level(crit, priv, type, dev, fmt, ##args)
 3260 #define netif_err(priv, type, dev, fmt, args...)		\
 3261 	netif_level(err, priv, type, dev, fmt, ##args)
 3262 #define netif_warn(priv, type, dev, fmt, args...)		\
 3263 	netif_level(warn, priv, type, dev, fmt, ##args)
 3264 #define netif_notice(priv, type, dev, fmt, args...)		\
 3265 	netif_level(notice, priv, type, dev, fmt, ##args)
 3266 #define netif_info(priv, type, dev, fmt, args...)		\
 3267 	netif_level(info, priv, type, dev, fmt, ##args)
 3268 
 3269 #if defined(CONFIG_DYNAMIC_DEBUG)
 3270 #define netif_dbg(priv, type, netdev, format, args...)		\
 3271 do {								\
 3272 	if (netif_msg_##type(priv))				\
 3273 		dynamic_netdev_dbg(netdev, format, ##args);	\
 3274 } while (0)
 3275 #elif defined(DEBUG)
 3276 #define netif_dbg(priv, type, dev, format, args...)		\
 3277 	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
 3278 #else
 3279 #define netif_dbg(priv, type, dev, format, args...)			\
 3280 ({									\
 3281 	if (0)								\
 3282 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 3283 	0;								\
 3284 })
 3285 #endif
 3286 
 3287 #if defined(VERBOSE_DEBUG)
 3288 #define netif_vdbg	netif_dbg
 3289 #else
 3290 #define netif_vdbg(priv, type, dev, format, args...)		\
 3291 ({								\
 3292 	if (0)							\
 3293 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 3294 	0;							\
 3295 })
 3296 #endif
 3297 
 3298 /*
 3299  *	The list of packet types we will receive (as opposed to discard)
 3300  *	and the routines to invoke.
 3301  *
 3302  *	Why 16. Because with 16 the only overlap we get on a hash of the
 3303  *	low nibble of the protocol value is RARP/SNAP/X.25.
 3304  *
 3305  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 3306  *             sure which should go first, but I bet it won't make much
 3307  *             difference if we are running VLANs.  The good news is that
 3308  *             this protocol won't be in the list unless compiled in, so
 3309  *             the average user (w/out VLANs) will not be adversely affected.
 3310  *             --BLG
 3311  *
 3312  *		0800	IP
 3313  *		8100    802.1Q VLAN
 3314  *		0001	802.3
 3315  *		0002	AX.25
 3316  *		0004	802.2
 3317  *		8035	RARP
 3318  *		0005	SNAP
 3319  *		0805	X.25
 3320  *		0806	ARP
 3321  *		8137	IPX
 3322  *		0009	Localtalk
 3323  *		86DD	IPv6
 3324  */
 3325 #define PTYPE_HASH_SIZE	(16)
 3326 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
 3327 
 3328 #endif	/* _LINUX_NETDEVICE_H */                 1 /*
    2  *	pci.h
    3  *
    4  *	PCI defines and function prototypes
    5  *	Copyright 1994, Drew Eckhardt
    6  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
    7  *
    8  *	For more information, please consult the following manuals (look at
    9  *	http://www.pcisig.com/ for how to get them):
   10  *
   11  *	PCI BIOS Specification
   12  *	PCI Local Bus Specification
   13  *	PCI to PCI Bridge Specification
   14  *	PCI System Design Guide
   15  */
   16 #ifndef LINUX_PCI_H
   17 #define LINUX_PCI_H
   18 
   19 
   20 #include <linux/mod_devicetable.h>
   21 
   22 #include <linux/types.h>
   23 #include <linux/init.h>
   24 #include <linux/ioport.h>
   25 #include <linux/list.h>
   26 #include <linux/compiler.h>
   27 #include <linux/errno.h>
   28 #include <linux/kobject.h>
   29 #include <linux/atomic.h>
   30 #include <linux/device.h>
   31 #include <linux/io.h>
   32 #include <linux/irqreturn.h>
   33 #include <uapi/linux/pci.h>
   34 
   35 #include <linux/pci_ids.h>
   36 
   37 /*
   38  * The PCI interface treats multi-function devices as independent
   39  * devices.  The slot/function address of each device is encoded
   40  * in a single byte as follows:
   41  *
   42  *	7:3 = slot
   43  *	2:0 = function
   44  *
   45  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
   46  * In the interest of not exposing interfaces to user-space unnecessarily,
   47  * the following kernel-only defines are being added here.
   48  */
   49 #define PCI_DEVID(bus, devfn)  ((((u16)bus) << 8) | devfn)
   50 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
   51 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
   52 
   53 /* pci_slot represents a physical slot */
   54 struct pci_slot {
   55 	struct pci_bus *bus;		/* The bus this slot is on */
   56 	struct list_head list;		/* node in list of slots on this bus */
   57 	struct hotplug_slot *hotplug;	/* Hotplug info (migrate over time) */
   58 	unsigned char number;		/* PCI_SLOT(pci_dev->devfn) */
   59 	struct kobject kobj;
   60 };
   61 
   62 static inline const char *pci_slot_name(const struct pci_slot *slot)
   63 {
   64 	return kobject_name(&slot->kobj);
   65 }
   66 
   67 /* File state for mmap()s on /proc/bus/pci/X/Y */
   68 enum pci_mmap_state {
   69 	pci_mmap_io,
   70 	pci_mmap_mem
   71 };
   72 
   73 /* This defines the direction arg to the DMA mapping routines. */
   74 #define PCI_DMA_BIDIRECTIONAL	0
   75 #define PCI_DMA_TODEVICE	1
   76 #define PCI_DMA_FROMDEVICE	2
   77 #define PCI_DMA_NONE		3
   78 
   79 /*
   80  *  For PCI devices, the region numbers are assigned this way:
   81  */
   82 enum {
   83 	/* #0-5: standard PCI resources */
   84 	PCI_STD_RESOURCES,
   85 	PCI_STD_RESOURCE_END = 5,
   86 
   87 	/* #6: expansion ROM resource */
   88 	PCI_ROM_RESOURCE,
   89 
   90 	/* device specific resources */
   91 #ifdef CONFIG_PCI_IOV
   92 	PCI_IOV_RESOURCES,
   93 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
   94 #endif
   95 
   96 	/* resources assigned to buses behind the bridge */
   97 #define PCI_BRIDGE_RESOURCE_NUM 4
   98 
   99 	PCI_BRIDGE_RESOURCES,
  100 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
  101 				  PCI_BRIDGE_RESOURCE_NUM - 1,
  102 
  103 	/* total resources associated with a PCI device */
  104 	PCI_NUM_RESOURCES,
  105 
  106 	/* preserve this for compatibility */
  107 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
  108 };
  109 
  110 typedef int __bitwise pci_power_t;
  111 
  112 #define PCI_D0		((pci_power_t __force) 0)
  113 #define PCI_D1		((pci_power_t __force) 1)
  114 #define PCI_D2		((pci_power_t __force) 2)
  115 #define PCI_D3hot	((pci_power_t __force) 3)
  116 #define PCI_D3cold	((pci_power_t __force) 4)
  117 #define PCI_UNKNOWN	((pci_power_t __force) 5)
  118 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
  119 
  120 /* Remember to update this when the list above changes! */
  121 extern const char *pci_power_names[];
  122 
  123 static inline const char *pci_power_name(pci_power_t state)
  124 {
  125 	return pci_power_names[1 + (int) state];
  126 }
  127 
  128 #define PCI_PM_D2_DELAY		200
  129 #define PCI_PM_D3_WAIT		10
  130 #define PCI_PM_D3COLD_WAIT	100
  131 #define PCI_PM_BUS_WAIT		50
  132 
  133 /** The pci_channel state describes connectivity between the CPU and
  134  *  the pci device.  If some PCI bus between here and the pci device
  135  *  has crashed or locked up, this info is reflected here.
  136  */
  137 typedef unsigned int __bitwise pci_channel_state_t;
  138 
  139 enum pci_channel_state {
  140 	/* I/O channel is in normal state */
  141 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
  142 
  143 	/* I/O to channel is blocked */
  144 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
  145 
  146 	/* PCI card is dead */
  147 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
  148 };
  149 
  150 typedef unsigned int __bitwise pcie_reset_state_t;
  151 
  152 enum pcie_reset_state {
  153 	/* Reset is NOT asserted (Use to deassert reset) */
  154 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
  155 
  156 	/* Use #PERST to reset PCIe device */
  157 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
  158 
  159 	/* Use PCIe Hot Reset to reset device */
  160 	pcie_hot_reset = (__force pcie_reset_state_t) 3
  161 };
  162 
  163 typedef unsigned short __bitwise pci_dev_flags_t;
  164 enum pci_dev_flags {
  165 	/* INTX_DISABLE in PCI_COMMAND register disables MSI
  166 	 * generation too.
  167 	 */
  168 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
  169 	/* Device configuration is irrevocably lost if disabled into D3 */
  170 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
  171 	/* Provide indication device is assigned by a Virtual Machine Manager */
  172 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
  173 };
  174 
  175 enum pci_irq_reroute_variant {
  176 	INTEL_IRQ_REROUTE_VARIANT = 1,
  177 	MAX_IRQ_REROUTE_VARIANTS = 3
  178 };
  179 
  180 typedef unsigned short __bitwise pci_bus_flags_t;
  181 enum pci_bus_flags {
  182 	PCI_BUS_FLAGS_NO_MSI   = (__force pci_bus_flags_t) 1,
  183 	PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
  184 };
  185 
  186 /* These values come from the PCI Express Spec */
  187 enum pcie_link_width {
  188 	PCIE_LNK_WIDTH_RESRV	= 0x00,
  189 	PCIE_LNK_X1		= 0x01,
  190 	PCIE_LNK_X2		= 0x02,
  191 	PCIE_LNK_X4		= 0x04,
  192 	PCIE_LNK_X8		= 0x08,
  193 	PCIE_LNK_X12		= 0x0C,
  194 	PCIE_LNK_X16		= 0x10,
  195 	PCIE_LNK_X32		= 0x20,
  196 	PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
  197 };
  198 
  199 /* Based on the PCI Hotplug Spec, but some values are made up by us */
  200 enum pci_bus_speed {
  201 	PCI_SPEED_33MHz			= 0x00,
  202 	PCI_SPEED_66MHz			= 0x01,
  203 	PCI_SPEED_66MHz_PCIX		= 0x02,
  204 	PCI_SPEED_100MHz_PCIX		= 0x03,
  205 	PCI_SPEED_133MHz_PCIX		= 0x04,
  206 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
  207 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
  208 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
  209 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
  210 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
  211 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
  212 	AGP_UNKNOWN			= 0x0c,
  213 	AGP_1X				= 0x0d,
  214 	AGP_2X				= 0x0e,
  215 	AGP_4X				= 0x0f,
  216 	AGP_8X				= 0x10,
  217 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
  218 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
  219 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
  220 	PCIE_SPEED_2_5GT		= 0x14,
  221 	PCIE_SPEED_5_0GT		= 0x15,
  222 	PCIE_SPEED_8_0GT		= 0x16,
  223 	PCI_SPEED_UNKNOWN		= 0xff,
  224 };
  225 
  226 struct pci_cap_saved_data {
  227 	u16 cap_nr;
  228 	bool cap_extended;
  229 	unsigned int size;
  230 	u32 data[0];
  231 };
  232 
  233 struct pci_cap_saved_state {
  234 	struct hlist_node next;
  235 	struct pci_cap_saved_data cap;
  236 };
  237 
  238 struct pcie_link_state;
  239 struct pci_vpd;
  240 struct pci_sriov;
  241 struct pci_ats;
  242 
  243 /*
  244  * The pci_dev structure is used to describe PCI devices.
  245  */
  246 struct pci_dev {
  247 	struct list_head bus_list;	/* node in per-bus list */
  248 	struct pci_bus	*bus;		/* bus this device is on */
  249 	struct pci_bus	*subordinate;	/* bus this device bridges to */
  250 
  251 	void		*sysdata;	/* hook for sys-specific extension */
  252 	struct proc_dir_entry *procent;	/* device entry in /proc/bus/pci */
  253 	struct pci_slot	*slot;		/* Physical slot this device is in */
  254 
  255 	unsigned int	devfn;		/* encoded device & function index */
  256 	unsigned short	vendor;
  257 	unsigned short	device;
  258 	unsigned short	subsystem_vendor;
  259 	unsigned short	subsystem_device;
  260 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
  261 	u8		revision;	/* PCI revision, low byte of class word */
  262 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
  263 	u8		pcie_cap;	/* PCIe capability offset */
  264 	u8		msi_cap;	/* MSI capability offset */
  265 	u8		msix_cap;	/* MSI-X capability offset */
  266 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
  267 	u8		rom_base_reg;	/* which config register controls the ROM */
  268 	u8		pin;		/* which interrupt pin this device uses */
  269 	u16		pcie_flags_reg;	/* cached PCIe Capabilities Register */
  270 
  271 	struct pci_driver *driver;	/* which driver has allocated this device */
  272 	u64		dma_mask;	/* Mask of the bits of bus address this
  273 					   device implements.  Normally this is
  274 					   0xffffffff.  You only need to change
  275 					   this if your device has broken DMA
  276 					   or supports 64-bit transfers.  */
  277 
  278 	struct device_dma_parameters dma_parms;
  279 
  280 	pci_power_t     current_state;  /* Current operating state. In ACPI-speak,
  281 					   this is D0-D3, D0 being fully functional,
  282 					   and D3 being off. */
  283 	u8		pm_cap;		/* PM capability offset */
  284 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
  285 					   can be generated */
  286 	unsigned int	pme_interrupt:1;
  287 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
  288 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
  289 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
  290 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
  291 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
  292 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
  293 	unsigned int	mmio_always_on:1;	/* disallow turning off io/mem
  294 						   decoding during bar sizing */
  295 	unsigned int	wakeup_prepared:1;
  296 	unsigned int	runtime_d3cold:1;	/* whether go through runtime
  297 						   D3cold, not set for devices
  298 						   powered on/off by the
  299 						   corresponding bridge */
  300 	unsigned int	d3_delay;	/* D3->D0 transition time in ms */
  301 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
  302 
  303 #ifdef CONFIG_PCIEASPM
  304 	struct pcie_link_state	*link_state;	/* ASPM link state */
  305 #endif
  306 
  307 	pci_channel_state_t error_state;	/* current connectivity state */
  308 	struct	device	dev;		/* Generic device interface */
  309 
  310 	int		cfg_size;	/* Size of configuration space */
  311 
  312 	/*
  313 	 * Instead of touching interrupt line and base address registers
  314 	 * directly, use the values stored here. They might be different!
  315 	 */
  316 	unsigned int	irq;
  317 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
  318 
  319 	bool match_driver;		/* Skip attaching driver */
  320 	/* These fields are used by common fixups */
  321 	unsigned int	transparent:1;	/* Subtractive decode PCI bridge */
  322 	unsigned int	multifunction:1;/* Part of multi-function device */
  323 	/* keep track of device state */
  324 	unsigned int	is_added:1;
  325 	unsigned int	is_busmaster:1; /* device is busmaster */
  326 	unsigned int	no_msi:1;	/* device may not use msi */
  327 	unsigned int	block_cfg_access:1;	/* config space access is blocked */
  328 	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
  329 	unsigned int	irq_reroute_variant:2;	/* device needs IRQ rerouting variant */
  330 	unsigned int	msi_enabled:1;
  331 	unsigned int	msix_enabled:1;
  332 	unsigned int	ari_enabled:1;	/* ARI forwarding */
  333 	unsigned int	is_managed:1;
  334 	unsigned int    needs_freset:1; /* Dev requires fundamental reset */
  335 	unsigned int	state_saved:1;
  336 	unsigned int	is_physfn:1;
  337 	unsigned int	is_virtfn:1;
  338 	unsigned int	reset_fn:1;
  339 	unsigned int    is_hotplug_bridge:1;
  340 	unsigned int    __aer_firmware_first_valid:1;
  341 	unsigned int	__aer_firmware_first:1;
  342 	unsigned int	broken_intx_masking:1;
  343 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
  344 	pci_dev_flags_t dev_flags;
  345 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
  346 
  347 	u32		saved_config_space[16]; /* config space saved at suspend time */
  348 	struct hlist_head saved_cap_space;
  349 	struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
  350 	int rom_attr_enabled;		/* has display of the rom attribute been enabled? */
  351 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
  352 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
  353 #ifdef CONFIG_PCI_MSI
  354 	struct list_head msi_list;
  355 	const struct attribute_group **msi_irq_groups;
  356 #endif
  357 	struct pci_vpd *vpd;
  358 #ifdef CONFIG_PCI_ATS
  359 	union {
  360 		struct pci_sriov *sriov;	/* SR-IOV capability related */
  361 		struct pci_dev *physfn;	/* the PF this VF is associated with */
  362 	};
  363 	struct pci_ats	*ats;	/* Address Translation Service */
  364 #endif
  365 	phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
  366 	size_t romlen; /* Length of ROM if it's not from the BAR */
  367 };
  368 
  369 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
  370 {
  371 #ifdef CONFIG_PCI_IOV
  372 	if (dev->is_virtfn)
  373 		dev = dev->physfn;
  374 #endif
  375 	return dev;
  376 }
  377 
  378 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
  379 
  380 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
  381 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
  382 
  383 static inline int pci_channel_offline(struct pci_dev *pdev)
  384 {
  385 	return (pdev->error_state != pci_channel_io_normal);
  386 }
  387 
  388 struct pci_host_bridge_window {
  389 	struct list_head list;
  390 	struct resource *res;		/* host bridge aperture (CPU address) */
  391 	resource_size_t offset;		/* bus address + offset = CPU address */
  392 };
  393 
  394 struct pci_host_bridge {
  395 	struct device dev;
  396 	struct pci_bus *bus;		/* root bus */
  397 	struct list_head windows;	/* pci_host_bridge_windows */
  398 	void (*release_fn)(struct pci_host_bridge *);
  399 	void *release_data;
  400 };
  401 
  402 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
  403 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
  404 		     void (*release_fn)(struct pci_host_bridge *),
  405 		     void *release_data);
  406 
  407 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
  408 
  409 /*
  410  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
  411  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
  412  * buses below host bridges or subtractive decode bridges) go in the list.
  413  * Use pci_bus_for_each_resource() to iterate through all the resources.
  414  */
  415 
  416 /*
  417  * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
  418  * and there's no way to program the bridge with the details of the window.
  419  * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
  420  * decode bit set, because they are explicit and can be programmed with _SRS.
  421  */
  422 #define PCI_SUBTRACTIVE_DECODE	0x1
  423 
  424 struct pci_bus_resource {
  425 	struct list_head list;
  426 	struct resource *res;
  427 	unsigned int flags;
  428 };
  429 
  430 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
  431 
  432 struct pci_bus {
  433 	struct list_head node;		/* node in list of buses */
  434 	struct pci_bus	*parent;	/* parent bus this bridge is on */
  435 	struct list_head children;	/* list of child buses */
  436 	struct list_head devices;	/* list of devices on this bus */
  437 	struct pci_dev	*self;		/* bridge device as seen by parent */
  438 	struct list_head slots;		/* list of slots on this bus */
  439 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
  440 	struct list_head resources;	/* address space routed to this bus */
  441 	struct resource busn_res;	/* bus numbers routed to this bus */
  442 
  443 	struct pci_ops	*ops;		/* configuration access functions */
  444 	struct msi_chip	*msi;		/* MSI controller */
  445 	void		*sysdata;	/* hook for sys-specific extension */
  446 	struct proc_dir_entry *procdir;	/* directory entry in /proc/bus/pci */
  447 
  448 	unsigned char	number;		/* bus number */
  449 	unsigned char	primary;	/* number of primary bridge */
  450 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
  451 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
  452 
  453 	char		name[48];
  454 
  455 	unsigned short  bridge_ctl;	/* manage NO_ISA/FBB/et al behaviors */
  456 	pci_bus_flags_t bus_flags;	/* inherited by child buses */
  457 	struct device		*bridge;
  458 	struct device		dev;
  459 	struct bin_attribute	*legacy_io; /* legacy I/O for this bus */
  460 	struct bin_attribute	*legacy_mem; /* legacy mem */
  461 	unsigned int		is_added:1;
  462 };
  463 
  464 #define pci_bus_b(n)	list_entry(n, struct pci_bus, node)
  465 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
  466 
  467 /*
  468  * Returns true if the PCI bus is root (behind host-PCI bridge),
  469  * false otherwise
  470  *
  471  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
  472  * This is incorrect because "virtual" buses added for SR-IOV (via
  473  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
  474  */
  475 static inline bool pci_is_root_bus(struct pci_bus *pbus)
  476 {
  477 	return !(pbus->parent);
  478 }
  479 
  480 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
  481 {
  482 	dev = pci_physfn(dev);
  483 	if (pci_is_root_bus(dev->bus))
  484 		return NULL;
  485 
  486 	return dev->bus->self;
  487 }
  488 
  489 #ifdef CONFIG_PCI_MSI
  490 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
  491 {
  492 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
  493 }
  494 #else
  495 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
  496 #endif
  497 
  498 /*
  499  * Error values that may be returned by PCI functions.
  500  */
  501 #define PCIBIOS_SUCCESSFUL		0x00
  502 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
  503 #define PCIBIOS_BAD_VENDOR_ID		0x83
  504 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
  505 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
  506 #define PCIBIOS_SET_FAILED		0x88
  507 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
  508 
  509 /*
  510  * Translate above to generic errno for passing back through non-PCI code.
  511  */
  512 static inline int pcibios_err_to_errno(int err)
  513 {
  514 	if (err <= PCIBIOS_SUCCESSFUL)
  515 		return err; /* Assume already errno */
  516 
  517 	switch (err) {
  518 	case PCIBIOS_FUNC_NOT_SUPPORTED:
  519 		return -ENOENT;
  520 	case PCIBIOS_BAD_VENDOR_ID:
  521 		return -EINVAL;
  522 	case PCIBIOS_DEVICE_NOT_FOUND:
  523 		return -ENODEV;
  524 	case PCIBIOS_BAD_REGISTER_NUMBER:
  525 		return -EFAULT;
  526 	case PCIBIOS_SET_FAILED:
  527 		return -EIO;
  528 	case PCIBIOS_BUFFER_TOO_SMALL:
  529 		return -ENOSPC;
  530 	}
  531 
  532 	return -ENOTTY;
  533 }
  534 
  535 /* Low-level architecture-dependent routines */
  536 
  537 struct pci_ops {
  538 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
  539 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
  540 };
  541 
  542 /*
  543  * ACPI needs to be able to access PCI config space before we've done a
  544  * PCI bus scan and created pci_bus structures.
  545  */
  546 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
  547 		 int reg, int len, u32 *val);
  548 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
  549 		  int reg, int len, u32 val);
  550 
  551 struct pci_bus_region {
  552 	dma_addr_t start;
  553 	dma_addr_t end;
  554 };
  555 
  556 struct pci_dynids {
  557 	spinlock_t lock;            /* protects list, index */
  558 	struct list_head list;      /* for IDs added at runtime */
  559 };
  560 
  561 
  562 /*
  563  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
  564  * a set of callbacks in struct pci_error_handlers, that device driver
  565  * will be notified of PCI bus errors, and will be driven to recovery
  566  * when an error occurs.
  567  */
  568 
  569 typedef unsigned int __bitwise pci_ers_result_t;
  570 
  571 enum pci_ers_result {
  572 	/* no result/none/not supported in device driver */
  573 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
  574 
  575 	/* Device driver can recover without slot reset */
  576 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
  577 
  578 	/* Device driver wants slot to be reset. */
  579 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
  580 
  581 	/* Device has completely failed, is unrecoverable */
  582 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
  583 
  584 	/* Device driver is fully recovered and operational */
  585 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
  586 
  587 	/* No AER capabilities registered for the driver */
  588 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
  589 };
  590 
  591 /* PCI bus error event callbacks */
  592 struct pci_error_handlers {
  593 	/* PCI bus error detected on this device */
  594 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
  595 					   enum pci_channel_state error);
  596 
  597 	/* MMIO has been re-enabled, but not DMA */
  598 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
  599 
  600 	/* PCI Express link has been reset */
  601 	pci_ers_result_t (*link_reset)(struct pci_dev *dev);
  602 
  603 	/* PCI slot has been reset */
  604 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
  605 
  606 	/* Device driver may resume normal operations */
  607 	void (*resume)(struct pci_dev *dev);
  608 };
  609 
  610 
  611 struct module;
  612 struct pci_driver {
  613 	struct list_head node;
  614 	const char *name;
  615 	const struct pci_device_id *id_table;	/* must be non-NULL for probe to be called */
  616 	int  (*probe)  (struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
  617 	void (*remove) (struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
  618 	int  (*suspend) (struct pci_dev *dev, pm_message_t state);	/* Device suspended */
  619 	int  (*suspend_late) (struct pci_dev *dev, pm_message_t state);
  620 	int  (*resume_early) (struct pci_dev *dev);
  621 	int  (*resume) (struct pci_dev *dev);	                /* Device woken up */
  622 	void (*shutdown) (struct pci_dev *dev);
  623 	int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
  624 	const struct pci_error_handlers *err_handler;
  625 	struct device_driver	driver;
  626 	struct pci_dynids dynids;
  627 };
  628 
  629 #define	to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
  630 
  631 /**
  632  * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
  633  * @_table: device table name
  634  *
  635  * This macro is deprecated and should not be used in new code.
  636  */
  637 #define DEFINE_PCI_DEVICE_TABLE(_table) \
  638 	const struct pci_device_id _table[]
  639 
  640 /**
  641  * PCI_DEVICE - macro used to describe a specific pci device
  642  * @vend: the 16 bit PCI Vendor ID
  643  * @dev: the 16 bit PCI Device ID
  644  *
  645  * This macro is used to create a struct pci_device_id that matches a
  646  * specific device.  The subvendor and subdevice fields will be set to
  647  * PCI_ANY_ID.
  648  */
  649 #define PCI_DEVICE(vend,dev) \
  650 	.vendor = (vend), .device = (dev), \
  651 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  652 
  653 /**
  654  * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
  655  * @vend: the 16 bit PCI Vendor ID
  656  * @dev: the 16 bit PCI Device ID
  657  * @subvend: the 16 bit PCI Subvendor ID
  658  * @subdev: the 16 bit PCI Subdevice ID
  659  *
  660  * This macro is used to create a struct pci_device_id that matches a
  661  * specific device with subsystem information.
  662  */
  663 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
  664 	.vendor = (vend), .device = (dev), \
  665 	.subvendor = (subvend), .subdevice = (subdev)
  666 
  667 /**
  668  * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
  669  * @dev_class: the class, subclass, prog-if triple for this device
  670  * @dev_class_mask: the class mask for this device
  671  *
  672  * This macro is used to create a struct pci_device_id that matches a
  673  * specific PCI class.  The vendor, device, subvendor, and subdevice
  674  * fields will be set to PCI_ANY_ID.
  675  */
  676 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
  677 	.class = (dev_class), .class_mask = (dev_class_mask), \
  678 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
  679 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  680 
  681 /**
  682  * PCI_VDEVICE - macro used to describe a specific pci device in short form
  683  * @vendor: the vendor name
  684  * @device: the 16 bit PCI Device ID
  685  *
  686  * This macro is used to create a struct pci_device_id that matches a
  687  * specific PCI device.  The subvendor, and subdevice fields will be set
  688  * to PCI_ANY_ID. The macro allows the next field to follow as the device
  689  * private data.
  690  */
  691 
  692 #define PCI_VDEVICE(vendor, device)		\
  693 	PCI_VENDOR_ID_##vendor, (device),	\
  694 	PCI_ANY_ID, PCI_ANY_ID, 0, 0
  695 
  696 /* these external functions are only available when PCI support is enabled */
  697 #ifdef CONFIG_PCI
  698 
  699 void pcie_bus_configure_settings(struct pci_bus *bus);
  700 
  701 enum pcie_bus_config_types {
  702 	PCIE_BUS_TUNE_OFF,
  703 	PCIE_BUS_SAFE,
  704 	PCIE_BUS_PERFORMANCE,
  705 	PCIE_BUS_PEER2PEER,
  706 };
  707 
  708 extern enum pcie_bus_config_types pcie_bus_config;
  709 
  710 extern struct bus_type pci_bus_type;
  711 
  712 /* Do NOT directly access these two variables, unless you are arch-specific PCI
  713  * code, or PCI core code. */
  714 extern struct list_head pci_root_buses;	/* list of all known PCI buses */
  715 /* Some device drivers need know if PCI is initiated */
  716 int no_pci_devices(void);
  717 
  718 void pcibios_resource_survey_bus(struct pci_bus *bus);
  719 void pcibios_add_bus(struct pci_bus *bus);
  720 void pcibios_remove_bus(struct pci_bus *bus);
  721 void pcibios_fixup_bus(struct pci_bus *);
  722 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
  723 /* Architecture-specific versions may override this (weak) */
  724 char *pcibios_setup(char *str);
  725 
  726 /* Used only when drivers/pci/setup.c is used */
  727 resource_size_t pcibios_align_resource(void *, const struct resource *,
  728 				resource_size_t,
  729 				resource_size_t);
  730 void pcibios_update_irq(struct pci_dev *, int irq);
  731 
  732 /* Weak but can be overriden by arch */
  733 void pci_fixup_cardbus(struct pci_bus *);
  734 
  735 /* Generic PCI functions used internally */
  736 
  737 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
  738 			     struct resource *res);
  739 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
  740 			     struct pci_bus_region *region);
  741 void pcibios_scan_specific_bus(int busn);
  742 struct pci_bus *pci_find_bus(int domain, int busnr);
  743 void pci_bus_add_devices(const struct pci_bus *bus);
  744 struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
  745 				      struct pci_ops *ops, void *sysdata);
  746 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
  747 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
  748 				    struct pci_ops *ops, void *sysdata,
  749 				    struct list_head *resources);
  750 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
  751 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
  752 void pci_bus_release_busn_res(struct pci_bus *b);
  753 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
  754 					     struct pci_ops *ops, void *sysdata,
  755 					     struct list_head *resources);
  756 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
  757 				int busnr);
  758 void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
  759 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
  760 				 const char *name,
  761 				 struct hotplug_slot *hotplug);
  762 void pci_destroy_slot(struct pci_slot *slot);
  763 int pci_scan_slot(struct pci_bus *bus, int devfn);
  764 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
  765 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
  766 unsigned int pci_scan_child_bus(struct pci_bus *bus);
  767 int __must_check pci_bus_add_device(struct pci_dev *dev);
  768 void pci_read_bridge_bases(struct pci_bus *child);
  769 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
  770 					  struct resource *res);
  771 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
  772 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
  773 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
  774 struct pci_dev *pci_dev_get(struct pci_dev *dev);
  775 void pci_dev_put(struct pci_dev *dev);
  776 void pci_remove_bus(struct pci_bus *b);
  777 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
  778 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
  779 void pci_stop_root_bus(struct pci_bus *bus);
  780 void pci_remove_root_bus(struct pci_bus *bus);
  781 void pci_setup_cardbus(struct pci_bus *bus);
  782 void pci_sort_breadthfirst(void);
  783 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
  784 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
  785 #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
  786 
  787 /* Generic PCI functions exported to card drivers */
  788 
  789 enum pci_lost_interrupt_reason {
  790 	PCI_LOST_IRQ_NO_INFORMATION = 0,
  791 	PCI_LOST_IRQ_DISABLE_MSI,
  792 	PCI_LOST_IRQ_DISABLE_MSIX,
  793 	PCI_LOST_IRQ_DISABLE_ACPI,
  794 };
  795 enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
  796 int pci_find_capability(struct pci_dev *dev, int cap);
  797 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
  798 int pci_find_ext_capability(struct pci_dev *dev, int cap);
  799 int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
  800 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
  801 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
  802 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
  803 
  804 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
  805 				struct pci_dev *from);
  806 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
  807 				unsigned int ss_vendor, unsigned int ss_device,
  808 				struct pci_dev *from);
  809 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
  810 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
  811 					    unsigned int devfn);
  812 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
  813 						   unsigned int devfn)
  814 {
  815 	return pci_get_domain_bus_and_slot(0, bus, devfn);
  816 }
  817 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
  818 int pci_dev_present(const struct pci_device_id *ids);
  819 
  820 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
  821 			     int where, u8 *val);
  822 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
  823 			     int where, u16 *val);
  824 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
  825 			      int where, u32 *val);
  826 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
  827 			      int where, u8 val);
  828 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
  829 			      int where, u16 val);
  830 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
  831 			       int where, u32 val);
  832 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
  833 
  834 static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
  835 {
  836 	return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  837 }
  838 static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
  839 {
  840 	return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
  841 }
  842 static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
  843 					u32 *val)
  844 {
  845 	return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  846 }
  847 static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
  848 {
  849 	return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
  850 }
  851 static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
  852 {
  853 	return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
  854 }
  855 static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
  856 					 u32 val)
  857 {
  858 	return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  859 }
  860 
  861 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
  862 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
  863 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
  864 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
  865 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
  866 				       u16 clear, u16 set);
  867 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
  868 					u32 clear, u32 set);
  869 
  870 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
  871 					   u16 set)
  872 {
  873 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
  874 }
  875 
  876 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
  877 					    u32 set)
  878 {
  879 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
  880 }
  881 
  882 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
  883 					     u16 clear)
  884 {
  885 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
  886 }
  887 
  888 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
  889 					      u32 clear)
  890 {
  891 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
  892 }
  893 
  894 /* user-space driven config access */
  895 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
  896 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
  897 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
  898 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
  899 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
  900 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
  901 
  902 int __must_check pci_enable_device(struct pci_dev *dev);
  903 int __must_check pci_enable_device_io(struct pci_dev *dev);
  904 int __must_check pci_enable_device_mem(struct pci_dev *dev);
  905 int __must_check pci_reenable_device(struct pci_dev *);
  906 int __must_check pcim_enable_device(struct pci_dev *pdev);
  907 void pcim_pin_device(struct pci_dev *pdev);
  908 
  909 static inline int pci_is_enabled(struct pci_dev *pdev)
  910 {
  911 	return (atomic_read(&pdev->enable_cnt) > 0);
  912 }
  913 
  914 static inline int pci_is_managed(struct pci_dev *pdev)
  915 {
  916 	return pdev->is_managed;
  917 }
  918 
  919 void pci_disable_device(struct pci_dev *dev);
  920 
  921 extern unsigned int pcibios_max_latency;
  922 void pci_set_master(struct pci_dev *dev);
  923 void pci_clear_master(struct pci_dev *dev);
  924 
  925 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
  926 int pci_set_cacheline_size(struct pci_dev *dev);
  927 #define HAVE_PCI_SET_MWI
  928 int __must_check pci_set_mwi(struct pci_dev *dev);
  929 int pci_try_set_mwi(struct pci_dev *dev);
  930 void pci_clear_mwi(struct pci_dev *dev);
  931 void pci_intx(struct pci_dev *dev, int enable);
  932 bool pci_intx_mask_supported(struct pci_dev *dev);
  933 bool pci_check_and_mask_intx(struct pci_dev *dev);
  934 bool pci_check_and_unmask_intx(struct pci_dev *dev);
  935 void pci_msi_off(struct pci_dev *dev);
  936 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
  937 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
  938 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
  939 int pci_wait_for_pending_transaction(struct pci_dev *dev);
  940 int pcix_get_max_mmrbc(struct pci_dev *dev);
  941 int pcix_get_mmrbc(struct pci_dev *dev);
  942 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
  943 int pcie_get_readrq(struct pci_dev *dev);
  944 int pcie_set_readrq(struct pci_dev *dev, int rq);
  945 int pcie_get_mps(struct pci_dev *dev);
  946 int pcie_set_mps(struct pci_dev *dev, int mps);
  947 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
  948 			  enum pcie_link_width *width);
  949 int __pci_reset_function(struct pci_dev *dev);
  950 int __pci_reset_function_locked(struct pci_dev *dev);
  951 int pci_reset_function(struct pci_dev *dev);
  952 int pci_try_reset_function(struct pci_dev *dev);
  953 int pci_probe_reset_slot(struct pci_slot *slot);
  954 int pci_reset_slot(struct pci_slot *slot);
  955 int pci_try_reset_slot(struct pci_slot *slot);
  956 int pci_probe_reset_bus(struct pci_bus *bus);
  957 int pci_reset_bus(struct pci_bus *bus);
  958 int pci_try_reset_bus(struct pci_bus *bus);
  959 void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
  960 void pci_update_resource(struct pci_dev *dev, int resno);
  961 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
  962 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
  963 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
  964 bool pci_device_is_present(struct pci_dev *pdev);
  965 
  966 /* ROM control related routines */
  967 int pci_enable_rom(struct pci_dev *pdev);
  968 void pci_disable_rom(struct pci_dev *pdev);
  969 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
  970 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
  971 size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
  972 void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
  973 
  974 /* Power management related routines */
  975 int pci_save_state(struct pci_dev *dev);
  976 void pci_restore_state(struct pci_dev *dev);
  977 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
  978 int pci_load_and_free_saved_state(struct pci_dev *dev,
  979 				  struct pci_saved_state **state);
  980 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
  981 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
  982 						   u16 cap);
  983 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
  984 int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
  985 				u16 cap, unsigned int size);
  986 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
  987 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
  988 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
  989 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
  990 void pci_pme_active(struct pci_dev *dev, bool enable);
  991 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
  992 		      bool runtime, bool enable);
  993 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
  994 int pci_prepare_to_sleep(struct pci_dev *dev);
  995 int pci_back_from_sleep(struct pci_dev *dev);
  996 bool pci_dev_run_wake(struct pci_dev *dev);
  997 bool pci_check_pme_status(struct pci_dev *dev);
  998 void pci_pme_wakeup_bus(struct pci_bus *bus);
  999 
 1000 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1001 				  bool enable)
 1002 {
 1003 	return __pci_enable_wake(dev, state, false, enable);
 1004 }
 1005 
 1006 /* PCI Virtual Channel */
 1007 int pci_save_vc_state(struct pci_dev *dev);
 1008 void pci_restore_vc_state(struct pci_dev *dev);
 1009 void pci_allocate_vc_save_buffers(struct pci_dev *dev);
 1010 
 1011 /* For use by arch with custom probe code */
 1012 void set_pcie_port_type(struct pci_dev *pdev);
 1013 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
 1014 
 1015 /* Functions for PCI Hotplug drivers to use */
 1016 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
 1017 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
 1018 unsigned int pci_rescan_bus(struct pci_bus *bus);
 1019 void pci_lock_rescan_remove(void);
 1020 void pci_unlock_rescan_remove(void);
 1021 
 1022 /* Vital product data routines */
 1023 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 1024 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
 1025 
 1026 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 1027 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
 1028 void pci_bus_assign_resources(const struct pci_bus *bus);
 1029 void pci_bus_size_bridges(struct pci_bus *bus);
 1030 int pci_claim_resource(struct pci_dev *, int);
 1031 void pci_assign_unassigned_resources(void);
 1032 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
 1033 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
 1034 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
 1035 void pdev_enable_device(struct pci_dev *);
 1036 int pci_enable_resources(struct pci_dev *, int mask);
 1037 void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
 1038 		    int (*)(const struct pci_dev *, u8, u8));
 1039 #define HAVE_PCI_REQ_REGIONS	2
 1040 int __must_check pci_request_regions(struct pci_dev *, const char *);
 1041 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
 1042 void pci_release_regions(struct pci_dev *);
 1043 int __must_check pci_request_region(struct pci_dev *, int, const char *);
 1044 int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
 1045 void pci_release_region(struct pci_dev *, int);
 1046 int pci_request_selected_regions(struct pci_dev *, int, const char *);
 1047 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
 1048 void pci_release_selected_regions(struct pci_dev *, int);
 1049 
 1050 /* drivers/pci/bus.c */
 1051 struct pci_bus *pci_bus_get(struct pci_bus *bus);
 1052 void pci_bus_put(struct pci_bus *bus);
 1053 void pci_add_resource(struct list_head *resources, struct resource *res);
 1054 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
 1055 			     resource_size_t offset);
 1056 void pci_free_resource_list(struct list_head *resources);
 1057 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags);
 1058 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
 1059 void pci_bus_remove_resources(struct pci_bus *bus);
 1060 
 1061 #define pci_bus_for_each_resource(bus, res, i)				\
 1062 	for (i = 0;							\
 1063 	    (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
 1064 	     i++)
 1065 
 1066 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
 1067 			struct resource *res, resource_size_t size,
 1068 			resource_size_t align, resource_size_t min,
 1069 			unsigned int type_mask,
 1070 			resource_size_t (*alignf)(void *,
 1071 						  const struct resource *,
 1072 						  resource_size_t,
 1073 						  resource_size_t),
 1074 			void *alignf_data);
 1075 
 1076 static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
 1077 {
 1078 	struct pci_bus_region region;
 1079 
 1080 	pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
 1081 	return region.start;
 1082 }
 1083 
 1084 /* Proper probing supporting hot-pluggable devices */
 1085 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
 1086 				       const char *mod_name);
 1087 
 1088 /*
 1089  * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
 1090  */
 1091 #define pci_register_driver(driver)		\
 1092 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
 1093 
 1094 void pci_unregister_driver(struct pci_driver *dev);
 1095 
 1096 /**
 1097  * module_pci_driver() - Helper macro for registering a PCI driver
 1098  * @__pci_driver: pci_driver struct
 1099  *
 1100  * Helper macro for PCI drivers which do not do anything special in module
 1101  * init/exit. This eliminates a lot of boilerplate. Each module may only
 1102  * use this macro once, and calling it replaces module_init() and module_exit()
 1103  */
 1104 #define module_pci_driver(__pci_driver) \
 1105 	module_driver(__pci_driver, pci_register_driver, \
 1106 		       pci_unregister_driver)
 1107 
 1108 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
 1109 int pci_add_dynid(struct pci_driver *drv,
 1110 		  unsigned int vendor, unsigned int device,
 1111 		  unsigned int subvendor, unsigned int subdevice,
 1112 		  unsigned int class, unsigned int class_mask,
 1113 		  unsigned long driver_data);
 1114 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
 1115 					 struct pci_dev *dev);
 1116 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
 1117 		    int pass);
 1118 
 1119 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
 1120 		  void *userdata);
 1121 int pci_cfg_space_size(struct pci_dev *dev);
 1122 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
 1123 void pci_setup_bridge(struct pci_bus *bus);
 1124 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
 1125 					 unsigned long type);
 1126 
 1127 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
 1128 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
 1129 
 1130 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
 1131 		      unsigned int command_bits, u32 flags);
 1132 /* kmem_cache style wrapper around pci_alloc_consistent() */
 1133 
 1134 #include <linux/pci-dma.h>
 1135 #include <linux/dmapool.h>
 1136 
 1137 #define	pci_pool dma_pool
 1138 #define pci_pool_create(name, pdev, size, align, allocation) \
 1139 		dma_pool_create(name, &pdev->dev, size, align, allocation)
 1140 #define	pci_pool_destroy(pool) dma_pool_destroy(pool)
 1141 #define	pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
 1142 #define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
 1143 
 1144 enum pci_dma_burst_strategy {
 1145 	PCI_DMA_BURST_INFINITY,	/* make bursts as large as possible,
 1146 				   strategy_parameter is N/A */
 1147 	PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
 1148 				   byte boundaries */
 1149 	PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
 1150 				   strategy_parameter byte boundaries */
 1151 };
 1152 
 1153 struct msix_entry {
 1154 	u32	vector;	/* kernel uses to write allocated vector */
 1155 	u16	entry;	/* driver uses to specify entry, OS writes */
 1156 };
 1157 
 1158 
 1159 #ifdef CONFIG_PCI_MSI
 1160 int pci_msi_vec_count(struct pci_dev *dev);
 1161 int pci_enable_msi_block(struct pci_dev *dev, int nvec);
 1162 void pci_msi_shutdown(struct pci_dev *dev);
 1163 void pci_disable_msi(struct pci_dev *dev);
 1164 int pci_msix_vec_count(struct pci_dev *dev);
 1165 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
 1166 void pci_msix_shutdown(struct pci_dev *dev);
 1167 void pci_disable_msix(struct pci_dev *dev);
 1168 void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 1169 void pci_restore_msi_state(struct pci_dev *dev);
 1170 int pci_msi_enabled(void);
 1171 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
 1172 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
 1173 {
 1174 	int rc = pci_enable_msi_range(dev, nvec, nvec);
 1175 	if (rc < 0)
 1176 		return rc;
 1177 	return 0;
 1178 }
 1179 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
 1180 			  int minvec, int maxvec);
 1181 static inline int pci_enable_msix_exact(struct pci_dev *dev,
 1182 					struct msix_entry *entries, int nvec)
 1183 {
 1184 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
 1185 	if (rc < 0)
 1186 		return rc;
 1187 	return 0;
 1188 }
 1189 #else
 1190 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 1191 static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec)
 1192 { return -ENOSYS; }
 1193 static inline void pci_msi_shutdown(struct pci_dev *dev) { }
 1194 static inline void pci_disable_msi(struct pci_dev *dev) { }
 1195 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 1196 static inline int pci_enable_msix(struct pci_dev *dev,
 1197 				  struct msix_entry *entries, int nvec)
 1198 { return -ENOSYS; }
 1199 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
 1200 static inline void pci_disable_msix(struct pci_dev *dev) { }
 1201 static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { }
 1202 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
 1203 static inline int pci_msi_enabled(void) { return 0; }
 1204 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
 1205 				       int maxvec)
 1206 { return -ENOSYS; }
 1207 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
 1208 { return -ENOSYS; }
 1209 static inline int pci_enable_msix_range(struct pci_dev *dev,
 1210 		      struct msix_entry *entries, int minvec, int maxvec)
 1211 { return -ENOSYS; }
 1212 static inline int pci_enable_msix_exact(struct pci_dev *dev,
 1213 		      struct msix_entry *entries, int nvec)
 1214 { return -ENOSYS; }
 1215 #endif
 1216 
 1217 #ifdef CONFIG_PCIEPORTBUS
 1218 extern bool pcie_ports_disabled;
 1219 extern bool pcie_ports_auto;
 1220 #else
 1221 #define pcie_ports_disabled	true
 1222 #define pcie_ports_auto		false
 1223 #endif
 1224 
 1225 #ifdef CONFIG_PCIEASPM
 1226 bool pcie_aspm_support_enabled(void);
 1227 #else
 1228 static inline bool pcie_aspm_support_enabled(void) { return false; }
 1229 #endif
 1230 
 1231 #ifdef CONFIG_PCIEAER
 1232 void pci_no_aer(void);
 1233 bool pci_aer_available(void);
 1234 #else
 1235 static inline void pci_no_aer(void) { }
 1236 static inline bool pci_aer_available(void) { return false; }
 1237 #endif
 1238 
 1239 #ifdef CONFIG_PCIE_ECRC
 1240 void pcie_set_ecrc_checking(struct pci_dev *dev);
 1241 void pcie_ecrc_get_policy(char *str);
 1242 #else
 1243 static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
 1244 static inline void pcie_ecrc_get_policy(char *str) { }
 1245 #endif
 1246 
 1247 #define pci_enable_msi(pdev)	pci_enable_msi_block(pdev, 1)
 1248 
 1249 #ifdef CONFIG_HT_IRQ
 1250 /* The functions a driver should call */
 1251 int  ht_create_irq(struct pci_dev *dev, int idx);
 1252 void ht_destroy_irq(unsigned int irq);
 1253 #endif /* CONFIG_HT_IRQ */
 1254 
 1255 void pci_cfg_access_lock(struct pci_dev *dev);
 1256 bool pci_cfg_access_trylock(struct pci_dev *dev);
 1257 void pci_cfg_access_unlock(struct pci_dev *dev);
 1258 
 1259 /*
 1260  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
 1261  * a PCI domain is defined to be a set of PCI buses which share
 1262  * configuration space.
 1263  */
 1264 #ifdef CONFIG_PCI_DOMAINS
 1265 extern int pci_domains_supported;
 1266 #else
 1267 enum { pci_domains_supported = 0 };
 1268 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 1269 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
 1270 #endif /* CONFIG_PCI_DOMAINS */
 1271 
 1272 /* some architectures require additional setup to direct VGA traffic */
 1273 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
 1274 		      unsigned int command_bits, u32 flags);
 1275 void pci_register_set_vga_state(arch_set_vga_state_t func);
 1276 
 1277 #else /* CONFIG_PCI is not enabled */
 1278 
 1279 /*
 1280  *  If the system does not have PCI, clearly these return errors.  Define
 1281  *  these as simple inline functions to avoid hair in drivers.
 1282  */
 1283 
 1284 #define _PCI_NOP(o, s, t) \
 1285 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
 1286 						int where, t val) \
 1287 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
 1288 
 1289 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
 1290 				_PCI_NOP(o, word, u16 x) \
 1291 				_PCI_NOP(o, dword, u32 x)
 1292 _PCI_NOP_ALL(read, *)
 1293 _PCI_NOP_ALL(write,)
 1294 
 1295 static inline struct pci_dev *pci_get_device(unsigned int vendor,
 1296 					     unsigned int device,
 1297 					     struct pci_dev *from)
 1298 { return NULL; }
 1299 
 1300 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
 1301 					     unsigned int device,
 1302 					     unsigned int ss_vendor,
 1303 					     unsigned int ss_device,
 1304 					     struct pci_dev *from)
 1305 { return NULL; }
 1306 
 1307 static inline struct pci_dev *pci_get_class(unsigned int class,
 1308 					    struct pci_dev *from)
 1309 { return NULL; }
 1310 
 1311 #define pci_dev_present(ids)	(0)
 1312 #define no_pci_devices()	(1)
 1313 #define pci_dev_put(dev)	do { } while (0)
 1314 
 1315 static inline void pci_set_master(struct pci_dev *dev) { }
 1316 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
 1317 static inline void pci_disable_device(struct pci_dev *dev) { }
 1318 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
 1319 { return -EIO; }
 1320 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
 1321 { return -EIO; }
 1322 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
 1323 					unsigned int size)
 1324 { return -EIO; }
 1325 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
 1326 					unsigned long mask)
 1327 { return -EIO; }
 1328 static inline int pci_assign_resource(struct pci_dev *dev, int i)
 1329 { return -EBUSY; }
 1330 static inline int __pci_register_driver(struct pci_driver *drv,
 1331 					struct module *owner)
 1332 { return 0; }
 1333 static inline int pci_register_driver(struct pci_driver *drv)
 1334 { return 0; }
 1335 static inline void pci_unregister_driver(struct pci_driver *drv) { }
 1336 static inline int pci_find_capability(struct pci_dev *dev, int cap)
 1337 { return 0; }
 1338 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
 1339 					   int cap)
 1340 { return 0; }
 1341 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
 1342 { return 0; }
 1343 
 1344 /* Power management related routines */
 1345 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
 1346 static inline void pci_restore_state(struct pci_dev *dev) { }
 1347 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 1348 { return 0; }
 1349 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
 1350 { return 0; }
 1351 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
 1352 					   pm_message_t state)
 1353 { return PCI_D0; }
 1354 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1355 				  int enable)
 1356 { return 0; }
 1357 
 1358 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
 1359 { return -EIO; }
 1360 static inline void pci_release_regions(struct pci_dev *dev) { }
 1361 
 1362 #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
 1363 
 1364 static inline void pci_block_cfg_access(struct pci_dev *dev) { }
 1365 static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
 1366 { return 0; }
 1367 static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
 1368 
 1369 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
 1370 { return NULL; }
 1371 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
 1372 						unsigned int devfn)
 1373 { return NULL; }
 1374 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
 1375 						unsigned int devfn)
 1376 { return NULL; }
 1377 
 1378 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 1379 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
 1380 
 1381 #define dev_is_pci(d) (false)
 1382 #define dev_is_pf(d) (false)
 1383 #define dev_num_vf(d) (0)
 1384 #endif /* CONFIG_PCI */
 1385 
 1386 /* Include architecture-dependent settings and functions */
 1387 
 1388 #include <asm/pci.h>
 1389 
 1390 /* these helpers provide future and backwards compatibility
 1391  * for accessing popular PCI BAR info */
 1392 #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
 1393 #define pci_resource_end(dev, bar)	((dev)->resource[(bar)].end)
 1394 #define pci_resource_flags(dev, bar)	((dev)->resource[(bar)].flags)
 1395 #define pci_resource_len(dev,bar) \
 1396 	((pci_resource_start((dev), (bar)) == 0 &&	\
 1397 	  pci_resource_end((dev), (bar)) ==		\
 1398 	  pci_resource_start((dev), (bar))) ? 0 :	\
 1399 							\
 1400 	 (pci_resource_end((dev), (bar)) -		\
 1401 	  pci_resource_start((dev), (bar)) + 1))
 1402 
 1403 /* Similar to the helpers above, these manipulate per-pci_dev
 1404  * driver-specific data.  They are really just a wrapper around
 1405  * the generic device structure functions of these calls.
 1406  */
 1407 static inline void *pci_get_drvdata(struct pci_dev *pdev)
 1408 {
 1409 	return dev_get_drvdata(&pdev->dev);
 1410 }
 1411 
 1412 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
 1413 {
 1414 	dev_set_drvdata(&pdev->dev, data);
 1415 }
 1416 
 1417 /* If you want to know what to call your pci_dev, ask this function.
 1418  * Again, it's a wrapper around the generic device.
 1419  */
 1420 static inline const char *pci_name(const struct pci_dev *pdev)
 1421 {
 1422 	return dev_name(&pdev->dev);
 1423 }
 1424 
 1425 
 1426 /* Some archs don't want to expose struct resource to userland as-is
 1427  * in sysfs and /proc
 1428  */
 1429 #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
 1430 static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
 1431 		const struct resource *rsrc, resource_size_t *start,
 1432 		resource_size_t *end)
 1433 {
 1434 	*start = rsrc->start;
 1435 	*end = rsrc->end;
 1436 }
 1437 #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
 1438 
 1439 
 1440 /*
 1441  *  The world is not perfect and supplies us with broken PCI devices.
 1442  *  For at least a part of these bugs we need a work-around, so both
 1443  *  generic (drivers/pci/quirks.c) and per-architecture code can define
 1444  *  fixup hooks to be called for particular buggy devices.
 1445  */
 1446 
 1447 struct pci_fixup {
 1448 	u16 vendor;		/* You can use PCI_ANY_ID here of course */
 1449 	u16 device;		/* You can use PCI_ANY_ID here of course */
 1450 	u32 class;		/* You can use PCI_ANY_ID here too */
 1451 	unsigned int class_shift;	/* should be 0, 8, 16 */
 1452 	void (*hook)(struct pci_dev *dev);
 1453 };
 1454 
 1455 enum pci_fixup_pass {
 1456 	pci_fixup_early,	/* Before probing BARs */
 1457 	pci_fixup_header,	/* After reading configuration header */
 1458 	pci_fixup_final,	/* Final phase of device fixups */
 1459 	pci_fixup_enable,	/* pci_enable_device() time */
 1460 	pci_fixup_resume,	/* pci_device_resume() */
 1461 	pci_fixup_suspend,	/* pci_device_suspend */
 1462 	pci_fixup_resume_early, /* pci_device_resume_early() */
 1463 };
 1464 
 1465 /* Anonymous variables would be nice... */
 1466 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
 1467 				  class_shift, hook)			\
 1468 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
 1469 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
 1470 		= { vendor, device, class, class_shift, hook };
 1471 
 1472 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
 1473 					 class_shift, hook)		\
 1474 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
 1475 		hook, vendor, device, class, class_shift, hook)
 1476 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
 1477 					 class_shift, hook)		\
 1478 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
 1479 		hook, vendor, device, class, class_shift, hook)
 1480 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
 1481 					 class_shift, hook)		\
 1482 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
 1483 		hook, vendor, device, class, class_shift, hook)
 1484 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
 1485 					 class_shift, hook)		\
 1486 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
 1487 		hook, vendor, device, class, class_shift, hook)
 1488 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
 1489 					 class_shift, hook)		\
 1490 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 1491 		resume##hook, vendor, device, class,	\
 1492 		class_shift, hook)
 1493 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
 1494 					 class_shift, hook)		\
 1495 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 1496 		resume_early##hook, vendor, device,	\
 1497 		class, class_shift, hook)
 1498 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
 1499 					 class_shift, hook)		\
 1500 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 1501 		suspend##hook, vendor, device, class,	\
 1502 		class_shift, hook)
 1503 
 1504 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
 1505 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
 1506 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1507 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
 1508 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
 1509 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1510 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
 1511 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
 1512 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1513 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
 1514 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
 1515 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1516 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
 1517 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 1518 		resume##hook, vendor, device,		\
 1519 		PCI_ANY_ID, 0, hook)
 1520 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
 1521 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 1522 		resume_early##hook, vendor, device,	\
 1523 		PCI_ANY_ID, 0, hook)
 1524 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
 1525 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 1526 		suspend##hook, vendor, device,		\
 1527 		PCI_ANY_ID, 0, hook)
 1528 
 1529 #ifdef CONFIG_PCI_QUIRKS
 1530 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
 1531 struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
 1532 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
 1533 #else
 1534 static inline void pci_fixup_device(enum pci_fixup_pass pass,
 1535 				    struct pci_dev *dev) { }
 1536 static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
 1537 {
 1538 	return pci_dev_get(dev);
 1539 }
 1540 static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
 1541 					       u16 acs_flags)
 1542 {
 1543 	return -ENOTTY;
 1544 }
 1545 #endif
 1546 
 1547 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
 1548 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
 1549 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
 1550 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
 1551 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
 1552 				   const char *name);
 1553 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
 1554 
 1555 extern int pci_pci_problems;
 1556 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
 1557 #define PCIPCI_TRITON		2
 1558 #define PCIPCI_NATOMA		4
 1559 #define PCIPCI_VIAETBF		8
 1560 #define PCIPCI_VSFX		16
 1561 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
 1562 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
 1563 
 1564 extern unsigned long pci_cardbus_io_size;
 1565 extern unsigned long pci_cardbus_mem_size;
 1566 extern u8 pci_dfl_cache_line_size;
 1567 extern u8 pci_cache_line_size;
 1568 
 1569 extern unsigned long pci_hotplug_io_size;
 1570 extern unsigned long pci_hotplug_mem_size;
 1571 
 1572 /* Architecture-specific versions may override these (weak) */
 1573 int pcibios_add_platform_entries(struct pci_dev *dev);
 1574 void pcibios_disable_device(struct pci_dev *dev);
 1575 void pcibios_set_master(struct pci_dev *dev);
 1576 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
 1577 				 enum pcie_reset_state state);
 1578 int pcibios_add_device(struct pci_dev *dev);
 1579 void pcibios_release_device(struct pci_dev *dev);
 1580 
 1581 #ifdef CONFIG_HIBERNATE_CALLBACKS
 1582 extern struct dev_pm_ops pcibios_pm_ops;
 1583 #endif
 1584 
 1585 #ifdef CONFIG_PCI_MMCONFIG
 1586 void __init pci_mmcfg_early_init(void);
 1587 void __init pci_mmcfg_late_init(void);
 1588 #else
 1589 static inline void pci_mmcfg_early_init(void) { }
 1590 static inline void pci_mmcfg_late_init(void) { }
 1591 #endif
 1592 
 1593 int pci_ext_cfg_avail(void);
 1594 
 1595 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
 1596 
 1597 #ifdef CONFIG_PCI_IOV
 1598 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
 1599 void pci_disable_sriov(struct pci_dev *dev);
 1600 irqreturn_t pci_sriov_migration(struct pci_dev *dev);
 1601 int pci_num_vf(struct pci_dev *dev);
 1602 int pci_vfs_assigned(struct pci_dev *dev);
 1603 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
 1604 int pci_sriov_get_totalvfs(struct pci_dev *dev);
 1605 #else
 1606 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
 1607 { return -ENODEV; }
 1608 static inline void pci_disable_sriov(struct pci_dev *dev) { }
 1609 static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev)
 1610 { return IRQ_NONE; }
 1611 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
 1612 static inline int pci_vfs_assigned(struct pci_dev *dev)
 1613 { return 0; }
 1614 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
 1615 { return 0; }
 1616 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
 1617 { return 0; }
 1618 #endif
 1619 
 1620 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
 1621 void pci_hp_create_module_link(struct pci_slot *pci_slot);
 1622 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
 1623 #endif
 1624 
 1625 /**
 1626  * pci_pcie_cap - get the saved PCIe capability offset
 1627  * @dev: PCI device
 1628  *
 1629  * PCIe capability offset is calculated at PCI device initialization
 1630  * time and saved in the data structure. This function returns saved
 1631  * PCIe capability offset. Using this instead of pci_find_capability()
 1632  * reduces unnecessary search in the PCI configuration space. If you
 1633  * need to calculate PCIe capability offset from raw device for some
 1634  * reasons, please use pci_find_capability() instead.
 1635  */
 1636 static inline int pci_pcie_cap(struct pci_dev *dev)
 1637 {
 1638 	return dev->pcie_cap;
 1639 }
 1640 
 1641 /**
 1642  * pci_is_pcie - check if the PCI device is PCI Express capable
 1643  * @dev: PCI device
 1644  *
 1645  * Returns: true if the PCI device is PCI Express capable, false otherwise.
 1646  */
 1647 static inline bool pci_is_pcie(struct pci_dev *dev)
 1648 {
 1649 	return pci_pcie_cap(dev);
 1650 }
 1651 
 1652 /**
 1653  * pcie_caps_reg - get the PCIe Capabilities Register
 1654  * @dev: PCI device
 1655  */
 1656 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
 1657 {
 1658 	return dev->pcie_flags_reg;
 1659 }
 1660 
 1661 /**
 1662  * pci_pcie_type - get the PCIe device/port type
 1663  * @dev: PCI device
 1664  */
 1665 static inline int pci_pcie_type(const struct pci_dev *dev)
 1666 {
 1667 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
 1668 }
 1669 
 1670 void pci_request_acs(void);
 1671 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
 1672 bool pci_acs_path_enabled(struct pci_dev *start,
 1673 			  struct pci_dev *end, u16 acs_flags);
 1674 
 1675 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
 1676 #define PCI_VPD_LRDT_ID(x)		(x | PCI_VPD_LRDT)
 1677 
 1678 /* Large Resource Data Type Tag Item Names */
 1679 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
 1680 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
 1681 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
 1682 
 1683 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
 1684 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
 1685 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
 1686 
 1687 /* Small Resource Data Type Tag Item Names */
 1688 #define PCI_VPD_STIN_END		0x78	/* End */
 1689 
 1690 #define PCI_VPD_SRDT_END		PCI_VPD_STIN_END
 1691 
 1692 #define PCI_VPD_SRDT_TIN_MASK		0x78
 1693 #define PCI_VPD_SRDT_LEN_MASK		0x07
 1694 
 1695 #define PCI_VPD_LRDT_TAG_SIZE		3
 1696 #define PCI_VPD_SRDT_TAG_SIZE		1
 1697 
 1698 #define PCI_VPD_INFO_FLD_HDR_SIZE	3
 1699 
 1700 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
 1701 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
 1702 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
 1703 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
 1704 
 1705 /**
 1706  * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
 1707  * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
 1708  *
 1709  * Returns the extracted Large Resource Data Type length.
 1710  */
 1711 static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
 1712 {
 1713 	return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
 1714 }
 1715 
 1716 /**
 1717  * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
 1718  * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
 1719  *
 1720  * Returns the extracted Small Resource Data Type length.
 1721  */
 1722 static inline u8 pci_vpd_srdt_size(const u8 *srdt)
 1723 {
 1724 	return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
 1725 }
 1726 
 1727 /**
 1728  * pci_vpd_info_field_size - Extracts the information field length
 1729  * @lrdt: Pointer to the beginning of an information field header
 1730  *
 1731  * Returns the extracted information field length.
 1732  */
 1733 static inline u8 pci_vpd_info_field_size(const u8 *info_field)
 1734 {
 1735 	return info_field[2];
 1736 }
 1737 
 1738 /**
 1739  * pci_vpd_find_tag - Locates the Resource Data Type tag provided
 1740  * @buf: Pointer to buffered vpd data
 1741  * @off: The offset into the buffer at which to begin the search
 1742  * @len: The length of the vpd buffer
 1743  * @rdt: The Resource Data Type to search for
 1744  *
 1745  * Returns the index where the Resource Data Type was found or
 1746  * -ENOENT otherwise.
 1747  */
 1748 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
 1749 
 1750 /**
 1751  * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
 1752  * @buf: Pointer to buffered vpd data
 1753  * @off: The offset into the buffer at which to begin the search
 1754  * @len: The length of the buffer area, relative to off, in which to search
 1755  * @kw: The keyword to search for
 1756  *
 1757  * Returns the index where the information field keyword was found or
 1758  * -ENOENT otherwise.
 1759  */
 1760 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 1761 			      unsigned int len, const char *kw);
 1762 
 1763 /* PCI <-> OF binding helpers */
 1764 #ifdef CONFIG_OF
 1765 struct device_node;
 1766 void pci_set_of_node(struct pci_dev *dev);
 1767 void pci_release_of_node(struct pci_dev *dev);
 1768 void pci_set_bus_of_node(struct pci_bus *bus);
 1769 void pci_release_bus_of_node(struct pci_bus *bus);
 1770 
 1771 /* Arch may override this (weak) */
 1772 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
 1773 
 1774 static inline struct device_node *
 1775 pci_device_to_OF_node(const struct pci_dev *pdev)
 1776 {
 1777 	return pdev ? pdev->dev.of_node : NULL;
 1778 }
 1779 
 1780 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
 1781 {
 1782 	return bus ? bus->dev.of_node : NULL;
 1783 }
 1784 
 1785 #else /* CONFIG_OF */
 1786 static inline void pci_set_of_node(struct pci_dev *dev) { }
 1787 static inline void pci_release_of_node(struct pci_dev *dev) { }
 1788 static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
 1789 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
 1790 #endif  /* CONFIG_OF */
 1791 
 1792 #ifdef CONFIG_EEH
 1793 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
 1794 {
 1795 	return pdev->dev.archdata.edev;
 1796 }
 1797 #endif
 1798 
 1799 /**
 1800  * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
 1801  * @pdev: the PCI device
 1802  *
 1803  * if the device is PCIE, return NULL
 1804  * if the device isn't connected to a PCIe bridge (that is its parent is a
 1805  * legacy PCI bridge and the bridge is directly connected to bus 0), return its
 1806  * parent
 1807  */
 1808 struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
 1809 
 1810 #endif /* LINUX_PCI_H */                 1 /*
    2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
    3  *
    4  * (C) SGI 2006, Christoph Lameter
    5  * 	Cleaned up and restructured to ease the addition of alternative
    6  * 	implementations of SLAB allocators.
    7  * (C) Linux Foundation 2008-2013
    8  *      Unified interface for all slab allocators
    9  */
   10 
   11 #ifndef _LINUX_SLAB_H
   12 #define	_LINUX_SLAB_H
   13 
   14 #include <linux/gfp.h>
   15 #include <linux/types.h>
   16 #include <linux/workqueue.h>
   17 
   18 
   19 /*
   20  * Flags to pass to kmem_cache_create().
   21  * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
   22  */
   23 #define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */
   24 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
   25 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
   26 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
   27 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
   28 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
   29 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
   30 /*
   31  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
   32  *
   33  * This delays freeing the SLAB page by a grace period, it does _NOT_
   34  * delay object freeing. This means that if you do kmem_cache_free()
   35  * that memory location is free to be reused at any time. Thus it may
   36  * be possible to see another object there in the same RCU grace period.
   37  *
   38  * This feature only ensures the memory location backing the object
   39  * stays valid, the trick to using this is relying on an independent
   40  * object validation pass. Something like:
   41  *
   42  *  rcu_read_lock()
   43  * again:
   44  *  obj = lockless_lookup(key);
   45  *  if (obj) {
   46  *    if (!try_get_ref(obj)) // might fail for free objects
   47  *      goto again;
   48  *
   49  *    if (obj->key != key) { // not the object we expected
   50  *      put_ref(obj);
   51  *      goto again;
   52  *    }
   53  *  }
   54  *  rcu_read_unlock();
   55  *
   56  * This is useful if we need to approach a kernel structure obliquely,
   57  * from its address obtained without the usual locking. We can lock
   58  * the structure to stabilize it and check it's still at the given address,
   59  * only if we can be sure that the memory has not been meanwhile reused
   60  * for some other kind of object (which our subsystem's lock might corrupt).
   61  *
   62  * rcu_read_lock before reading the address, then rcu_read_unlock after
   63  * taking the spinlock within the structure expected at that address.
   64  */
   65 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
   66 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
   67 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
   68 
   69 /* Flag to prevent checks on free */
   70 #ifdef CONFIG_DEBUG_OBJECTS
   71 # define SLAB_DEBUG_OBJECTS	0x00400000UL
   72 #else
   73 # define SLAB_DEBUG_OBJECTS	0x00000000UL
   74 #endif
   75 
   76 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
   77 
   78 /* Don't track use of uninitialized memory */
   79 #ifdef CONFIG_KMEMCHECK
   80 # define SLAB_NOTRACK		0x01000000UL
   81 #else
   82 # define SLAB_NOTRACK		0x00000000UL
   83 #endif
   84 #ifdef CONFIG_FAILSLAB
   85 # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
   86 #else
   87 # define SLAB_FAILSLAB		0x00000000UL
   88 #endif
   89 
   90 /* The following flags affect the page allocator grouping pages by mobility */
   91 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
   92 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
   93 /*
   94  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
   95  *
   96  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
   97  *
   98  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
   99  * Both make kfree a no-op.
  100  */
  101 #define ZERO_SIZE_PTR ((void *)16)
  102 
  103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  104 				(unsigned long)ZERO_SIZE_PTR)
  105 
  106 #include <linux/kmemleak.h>
  107 
  108 struct mem_cgroup;
  109 /*
  110  * struct kmem_cache related prototypes
  111  */
  112 void __init kmem_cache_init(void);
  113 int slab_is_available(void);
  114 
  115 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  116 			unsigned long,
  117 			void (*)(void *));
  118 struct kmem_cache *
  119 kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
  120 			unsigned long, void (*)(void *), struct kmem_cache *);
  121 void kmem_cache_destroy(struct kmem_cache *);
  122 int kmem_cache_shrink(struct kmem_cache *);
  123 void kmem_cache_free(struct kmem_cache *, void *);
  124 
  125 /*
  126  * Please use this macro to create slab caches. Simply specify the
  127  * name of the structure and maybe some flags that are listed above.
  128  *
  129  * The alignment of the struct determines object alignment. If you
  130  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
  131  * then the objects will be properly aligned in SMP configurations.
  132  */
  133 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  134 		sizeof(struct __struct), __alignof__(struct __struct),\
  135 		(__flags), NULL)
  136 
  137 /*
  138  * Common kmalloc functions provided by all allocators
  139  */
  140 void * __must_check __krealloc(const void *, size_t, gfp_t);
  141 void * __must_check krealloc(const void *, size_t, gfp_t);
  142 void kfree(const void *);
  143 void kzfree(const void *);
  144 size_t ksize(const void *);
  145 
  146 /*
  147  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  148  * alignment larger than the alignment of a 64-bit integer.
  149  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
  150  */
  151 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  152 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  153 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  154 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  155 #else
  156 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  157 #endif
  158 
  159 #ifdef CONFIG_SLOB
  160 /*
  161  * Common fields provided in kmem_cache by all slab allocators
  162  * This struct is either used directly by the allocator (SLOB)
  163  * or the allocator must include definitions for all fields
  164  * provided in kmem_cache_common in their definition of kmem_cache.
  165  *
  166  * Once we can do anonymous structs (C11 standard) we could put a
  167  * anonymous struct definition in these allocators so that the
  168  * separate allocations in the kmem_cache structure of SLAB and
  169  * SLUB is no longer needed.
  170  */
  171 struct kmem_cache {
  172 	unsigned int object_size;/* The original size of the object */
  173 	unsigned int size;	/* The aligned/padded/added on size  */
  174 	unsigned int align;	/* Alignment as calculated */
  175 	unsigned long flags;	/* Active flags on the slab */
  176 	const char *name;	/* Slab name for sysfs */
  177 	int refcount;		/* Use counter */
  178 	void (*ctor)(void *);	/* Called on object slot creation */
  179 	struct list_head list;	/* List of all slab caches on the system */
  180 };
  181 
  182 #endif /* CONFIG_SLOB */
  183 
  184 /*
  185  * Kmalloc array related definitions
  186  */
  187 
  188 #ifdef CONFIG_SLAB
  189 /*
  190  * The largest kmalloc size supported by the SLAB allocators is
  191  * 32 megabyte (2^25) or the maximum allocatable page order if that is
  192  * less than 32 MB.
  193  *
  194  * WARNING: Its not easy to increase this value since the allocators have
  195  * to do various tricks to work around compiler limitations in order to
  196  * ensure proper constant folding.
  197  */
  198 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  199 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
  200 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
  201 #ifndef KMALLOC_SHIFT_LOW
  202 #define KMALLOC_SHIFT_LOW	5
  203 #endif
  204 #endif
  205 
  206 #ifdef CONFIG_SLUB
  207 /*
  208  * SLUB directly allocates requests fitting in to an order-1 page
  209  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  210  */
  211 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
  212 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
  213 #ifndef KMALLOC_SHIFT_LOW
  214 #define KMALLOC_SHIFT_LOW	3
  215 #endif
  216 #endif
  217 
  218 #ifdef CONFIG_SLOB
  219 /*
  220  * SLOB passes all requests larger than one page to the page allocator.
  221  * No kmalloc array is necessary since objects of different sizes can
  222  * be allocated from the same page.
  223  */
  224 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
  225 #define KMALLOC_SHIFT_MAX	30
  226 #ifndef KMALLOC_SHIFT_LOW
  227 #define KMALLOC_SHIFT_LOW	3
  228 #endif
  229 #endif
  230 
  231 /* Maximum allocatable size */
  232 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  233 /* Maximum size for which we actually use a slab cache */
  234 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  235 /* Maximum order allocatable via the slab allocagtor */
  236 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
  237 
  238 /*
  239  * Kmalloc subsystem.
  240  */
  241 #ifndef KMALLOC_MIN_SIZE
  242 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
  243 #endif
  244 
  245 #ifndef CONFIG_SLOB
  246 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  247 #ifdef CONFIG_ZONE_DMA
  248 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  249 #endif
  250 
  251 /*
  252  * Figure out which kmalloc slab an allocation of a certain size
  253  * belongs to.
  254  * 0 = zero alloc
  255  * 1 =  65 .. 96 bytes
  256  * 2 = 120 .. 192 bytes
  257  * n = 2^(n-1) .. 2^n -1
  258  */
  259 static __always_inline int kmalloc_index(size_t size)
  260 {
  261 	if (!size)
  262 		return 0;
  263 
  264 	if (size <= KMALLOC_MIN_SIZE)
  265 		return KMALLOC_SHIFT_LOW;
  266 
  267 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  268 		return 1;
  269 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  270 		return 2;
  271 	if (size <=          8) return 3;
  272 	if (size <=         16) return 4;
  273 	if (size <=         32) return 5;
  274 	if (size <=         64) return 6;
  275 	if (size <=        128) return 7;
  276 	if (size <=        256) return 8;
  277 	if (size <=        512) return 9;
  278 	if (size <=       1024) return 10;
  279 	if (size <=   2 * 1024) return 11;
  280 	if (size <=   4 * 1024) return 12;
  281 	if (size <=   8 * 1024) return 13;
  282 	if (size <=  16 * 1024) return 14;
  283 	if (size <=  32 * 1024) return 15;
  284 	if (size <=  64 * 1024) return 16;
  285 	if (size <= 128 * 1024) return 17;
  286 	if (size <= 256 * 1024) return 18;
  287 	if (size <= 512 * 1024) return 19;
  288 	if (size <= 1024 * 1024) return 20;
  289 	if (size <=  2 * 1024 * 1024) return 21;
  290 	if (size <=  4 * 1024 * 1024) return 22;
  291 	if (size <=  8 * 1024 * 1024) return 23;
  292 	if (size <=  16 * 1024 * 1024) return 24;
  293 	if (size <=  32 * 1024 * 1024) return 25;
  294 	if (size <=  64 * 1024 * 1024) return 26;
  295 	BUG();
  296 
  297 	/* Will never be reached. Needed because the compiler may complain */
  298 	return -1;
  299 }
  300 #endif /* !CONFIG_SLOB */
  301 
  302 void *__kmalloc(size_t size, gfp_t flags);
  303 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
  304 
  305 #ifdef CONFIG_NUMA
  306 void *__kmalloc_node(size_t size, gfp_t flags, int node);
  307 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  308 #else
  309 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  310 {
  311 	return __kmalloc(size, flags);
  312 }
  313 
  314 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  315 {
  316 	return kmem_cache_alloc(s, flags);
  317 }
  318 #endif
  319 
  320 #ifdef CONFIG_TRACING
  321 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
  322 
  323 #ifdef CONFIG_NUMA
  324 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  325 					   gfp_t gfpflags,
  326 					   int node, size_t size);
  327 #else
  328 static __always_inline void *
  329 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  330 			      gfp_t gfpflags,
  331 			      int node, size_t size)
  332 {
  333 	return kmem_cache_alloc_trace(s, gfpflags, size);
  334 }
  335 #endif /* CONFIG_NUMA */
  336 
  337 #else /* CONFIG_TRACING */
  338 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  339 		gfp_t flags, size_t size)
  340 {
  341 	return kmem_cache_alloc(s, flags);
  342 }
  343 
  344 static __always_inline void *
  345 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  346 			      gfp_t gfpflags,
  347 			      int node, size_t size)
  348 {
  349 	return kmem_cache_alloc_node(s, gfpflags, node);
  350 }
  351 #endif /* CONFIG_TRACING */
  352 
  353 #ifdef CONFIG_SLAB
  354 #include <linux/slab_def.h>
  355 #endif
  356 
  357 #ifdef CONFIG_SLUB
  358 #include <linux/slub_def.h>
  359 #endif
  360 
  361 static __always_inline void *
  362 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
  363 {
  364 	void *ret;
  365 
  366 	flags |= (__GFP_COMP | __GFP_KMEMCG);
  367 	ret = (void *) __get_free_pages(flags, order);
  368 	kmemleak_alloc(ret, size, 1, flags);
  369 	return ret;
  370 }
  371 
  372 #ifdef CONFIG_TRACING
  373 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
  374 #else
  375 static __always_inline void *
  376 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  377 {
  378 	return kmalloc_order(size, flags, order);
  379 }
  380 #endif
  381 
  382 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  383 {
  384 	unsigned int order = get_order(size);
  385 	return kmalloc_order_trace(size, flags, order);
  386 }
  387 
  388 /**
  389  * kmalloc - allocate memory
  390  * @size: how many bytes of memory are required.
  391  * @flags: the type of memory to allocate.
  392  *
  393  * kmalloc is the normal method of allocating memory
  394  * for objects smaller than page size in the kernel.
  395  *
  396  * The @flags argument may be one of:
  397  *
  398  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
  399  *
  400  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
  401  *
  402  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
  403  *   For example, use this inside interrupt handlers.
  404  *
  405  * %GFP_HIGHUSER - Allocate pages from high memory.
  406  *
  407  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
  408  *
  409  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
  410  *
  411  * %GFP_NOWAIT - Allocation will not sleep.
  412  *
  413  * %__GFP_THISNODE - Allocate node-local memory only.
  414  *
  415  * %GFP_DMA - Allocation suitable for DMA.
  416  *   Should only be used for kmalloc() caches. Otherwise, use a
  417  *   slab created with SLAB_DMA.
  418  *
  419  * Also it is possible to set different flags by OR'ing
  420  * in one or more of the following additional @flags:
  421  *
  422  * %__GFP_COLD - Request cache-cold pages instead of
  423  *   trying to return cache-warm pages.
  424  *
  425  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
  426  *
  427  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
  428  *   (think twice before using).
  429  *
  430  * %__GFP_NORETRY - If memory is not immediately available,
  431  *   then give up at once.
  432  *
  433  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
  434  *
  435  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
  436  *
  437  * There are other flags available as well, but these are not intended
  438  * for general use, and so are not documented here. For a full list of
  439  * potential flags, always refer to linux/gfp.h.
  440  */
  441 static __always_inline void *kmalloc(size_t size, gfp_t flags)
  442 {
  443 	if (__builtin_constant_p(size)) {
  444 		if (size > KMALLOC_MAX_CACHE_SIZE)
  445 			return kmalloc_large(size, flags);
  446 #ifndef CONFIG_SLOB
  447 		if (!(flags & GFP_DMA)) {
  448 			int index = kmalloc_index(size);
  449 
  450 			if (!index)
  451 				return ZERO_SIZE_PTR;
  452 
  453 			return kmem_cache_alloc_trace(kmalloc_caches[index],
  454 					flags, size);
  455 		}
  456 #endif
  457 	}
  458 	return __kmalloc(size, flags);
  459 }
  460 
  461 /*
  462  * Determine size used for the nth kmalloc cache.
  463  * return size or 0 if a kmalloc cache for that
  464  * size does not exist
  465  */
  466 static __always_inline int kmalloc_size(int n)
  467 {
  468 #ifndef CONFIG_SLOB
  469 	if (n > 2)
  470 		return 1 << n;
  471 
  472 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  473 		return 96;
  474 
  475 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  476 		return 192;
  477 #endif
  478 	return 0;
  479 }
  480 
  481 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  482 {
  483 #ifndef CONFIG_SLOB
  484 	if (__builtin_constant_p(size) &&
  485 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
  486 		int i = kmalloc_index(size);
  487 
  488 		if (!i)
  489 			return ZERO_SIZE_PTR;
  490 
  491 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  492 						flags, node, size);
  493 	}
  494 #endif
  495 	return __kmalloc_node(size, flags, node);
  496 }
  497 
  498 /*
  499  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  500  * Intended for arches that get misalignment faults even for 64 bit integer
  501  * aligned buffers.
  502  */
  503 #ifndef ARCH_SLAB_MINALIGN
  504 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  505 #endif
  506 /*
  507  * This is the main placeholder for memcg-related information in kmem caches.
  508  * struct kmem_cache will hold a pointer to it, so the memory cost while
  509  * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
  510  * would otherwise be if that would be bundled in kmem_cache: we'll need an
  511  * extra pointer chase. But the trade off clearly lays in favor of not
  512  * penalizing non-users.
  513  *
  514  * Both the root cache and the child caches will have it. For the root cache,
  515  * this will hold a dynamically allocated array large enough to hold
  516  * information about the currently limited memcgs in the system. To allow the
  517  * array to be accessed without taking any locks, on relocation we free the old
  518  * version only after a grace period.
  519  *
  520  * Child caches will hold extra metadata needed for its operation. Fields are:
  521  *
  522  * @memcg: pointer to the memcg this cache belongs to
  523  * @list: list_head for the list of all caches in this memcg
  524  * @root_cache: pointer to the global, root cache, this cache was derived from
  525  * @dead: set to true after the memcg dies; the cache may still be around.
  526  * @nr_pages: number of pages that belongs to this cache.
  527  * @destroy: worker to be called whenever we are ready, or believe we may be
  528  *           ready, to destroy this cache.
  529  */
  530 struct memcg_cache_params {
  531 	bool is_root_cache;
  532 	union {
  533 		struct {
  534 			struct rcu_head rcu_head;
  535 			struct kmem_cache *memcg_caches[0];
  536 		};
  537 		struct {
  538 			struct mem_cgroup *memcg;
  539 			struct list_head list;
  540 			struct kmem_cache *root_cache;
  541 			bool dead;
  542 			atomic_t nr_pages;
  543 			struct work_struct destroy;
  544 		};
  545 	};
  546 };
  547 
  548 int memcg_update_all_caches(int num_memcgs);
  549 
  550 struct seq_file;
  551 int cache_show(struct kmem_cache *s, struct seq_file *m);
  552 void print_slabinfo_header(struct seq_file *m);
  553 
  554 /**
  555  * kmalloc_array - allocate memory for an array.
  556  * @n: number of elements.
  557  * @size: element size.
  558  * @flags: the type of memory to allocate (see kmalloc).
  559  */
  560 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  561 {
  562 	if (size != 0 && n > SIZE_MAX / size)
  563 		return NULL;
  564 	return __kmalloc(n * size, flags);
  565 }
  566 
  567 /**
  568  * kcalloc - allocate memory for an array. The memory is set to zero.
  569  * @n: number of elements.
  570  * @size: element size.
  571  * @flags: the type of memory to allocate (see kmalloc).
  572  */
  573 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  574 {
  575 	return kmalloc_array(n, size, flags | __GFP_ZERO);
  576 }
  577 
  578 /*
  579  * kmalloc_track_caller is a special version of kmalloc that records the
  580  * calling function of the routine calling it for slab leak tracking instead
  581  * of just the calling function (confusing, eh?).
  582  * It's useful when the call to kmalloc comes from a widely-used standard
  583  * allocator where we care about the real place the memory allocation
  584  * request comes from.
  585  */
  586 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
  587 	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
  588 	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
  589 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
  590 #define kmalloc_track_caller(size, flags) \
  591 	__kmalloc_track_caller(size, flags, _RET_IP_)
  592 #else
  593 #define kmalloc_track_caller(size, flags) \
  594 	__kmalloc(size, flags)
  595 #endif /* DEBUG_SLAB */
  596 
  597 #ifdef CONFIG_NUMA
  598 /*
  599  * kmalloc_node_track_caller is a special version of kmalloc_node that
  600  * records the calling function of the routine calling it for slab leak
  601  * tracking instead of just the calling function (confusing, eh?).
  602  * It's useful when the call to kmalloc_node comes from a widely-used
  603  * standard allocator where we care about the real place the memory
  604  * allocation request comes from.
  605  */
  606 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
  607 	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
  608 	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
  609 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
  610 #define kmalloc_node_track_caller(size, flags, node) \
  611 	__kmalloc_node_track_caller(size, flags, node, \
  612 			_RET_IP_)
  613 #else
  614 #define kmalloc_node_track_caller(size, flags, node) \
  615 	__kmalloc_node(size, flags, node)
  616 #endif
  617 
  618 #else /* CONFIG_NUMA */
  619 
  620 #define kmalloc_node_track_caller(size, flags, node) \
  621 	kmalloc_track_caller(size, flags)
  622 
  623 #endif /* CONFIG_NUMA */
  624 
  625 /*
  626  * Shortcuts
  627  */
  628 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  629 {
  630 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  631 }
  632 
  633 /**
  634  * kzalloc - allocate memory. The memory is set to zero.
  635  * @size: how many bytes of memory are required.
  636  * @flags: the type of memory to allocate (see kmalloc).
  637  */
  638 static inline void *kzalloc(size_t size, gfp_t flags)
  639 {
  640 	return kmalloc(size, flags | __GFP_ZERO);
  641 }
  642 
  643 /**
  644  * kzalloc_node - allocate zeroed memory from a particular memory node.
  645  * @size: how many bytes of memory are required.
  646  * @flags: the type of memory to allocate (see kmalloc).
  647  * @node: memory node from which to allocate
  648  */
  649 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  650 {
  651 	return kmalloc_node(size, flags | __GFP_ZERO, node);
  652 }
  653 
  654 /*
  655  * Determine the size of a slab object
  656  */
  657 static inline unsigned int kmem_cache_size(struct kmem_cache *s)
  658 {
  659 	return s->object_size;
  660 }
  661 
  662 void __init kmem_cache_init_late(void);
  663 
  664 #endif	/* _LINUX_SLAB_H */                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with a LOAD inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /*
  134  * Place this after a lock-acquisition primitive to guarantee that
  135  * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
  136  * if the UNLOCK and LOCK are executed by the same CPU or if the
  137  * UNLOCK and LOCK operate on the same lock variable.
  138  */
  139 #ifndef smp_mb__after_unlock_lock
  140 #define smp_mb__after_unlock_lock()	do { } while (0)
  141 #endif
  142 
  143 /**
  144  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  145  * @lock: the spinlock in question.
  146  */
  147 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  148 
  149 #ifdef CONFIG_DEBUG_SPINLOCK
  150  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  152  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  153  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  154 #else
  155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  156 {
  157 	__acquire(lock);
  158 	arch_spin_lock(&lock->raw_lock);
  159 }
  160 
  161 static inline void
  162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  163 {
  164 	__acquire(lock);
  165 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  166 }
  167 
  168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  169 {
  170 	return arch_spin_trylock(&(lock)->raw_lock);
  171 }
  172 
  173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  174 {
  175 	arch_spin_unlock(&lock->raw_lock);
  176 	__release(lock);
  177 }
  178 #endif
  179 
  180 /*
  181  * Define the various spin_lock methods.  Note we define these
  182  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  183  * various methods are defined as nops in the case they are not
  184  * required.
  185  */
  186 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  187 
  188 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  189 
  190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  191 # define raw_spin_lock_nested(lock, subclass) \
  192 	_raw_spin_lock_nested(lock, subclass)
  193 
  194 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  195 	 do {								\
  196 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  197 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  198 	 } while (0)
  199 #else
  200 # define raw_spin_lock_nested(lock, subclass)		_raw_spin_lock(lock)
  201 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  202 #endif
  203 
  204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  205 
  206 #define raw_spin_lock_irqsave(lock, flags)			\
  207 	do {						\
  208 		typecheck(unsigned long, flags);	\
  209 		flags = _raw_spin_lock_irqsave(lock);	\
  210 	} while (0)
  211 
  212 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  213 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  214 	do {								\
  215 		typecheck(unsigned long, flags);			\
  216 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  217 	} while (0)
  218 #else
  219 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  220 	do {								\
  221 		typecheck(unsigned long, flags);			\
  222 		flags = _raw_spin_lock_irqsave(lock);			\
  223 	} while (0)
  224 #endif
  225 
  226 #else
  227 
  228 #define raw_spin_lock_irqsave(lock, flags)		\
  229 	do {						\
  230 		typecheck(unsigned long, flags);	\
  231 		_raw_spin_lock_irqsave(lock, flags);	\
  232 	} while (0)
  233 
  234 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  235 	raw_spin_lock_irqsave(lock, flags)
  236 
  237 #endif
  238 
  239 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  240 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  241 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  242 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  243 
  244 #define raw_spin_unlock_irqrestore(lock, flags)		\
  245 	do {							\
  246 		typecheck(unsigned long, flags);		\
  247 		_raw_spin_unlock_irqrestore(lock, flags);	\
  248 	} while (0)
  249 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  250 
  251 #define raw_spin_trylock_bh(lock) \
  252 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  253 
  254 #define raw_spin_trylock_irq(lock) \
  255 ({ \
  256 	local_irq_disable(); \
  257 	raw_spin_trylock(lock) ? \
  258 	1 : ({ local_irq_enable(); 0;  }); \
  259 })
  260 
  261 #define raw_spin_trylock_irqsave(lock, flags) \
  262 ({ \
  263 	local_irq_save(flags); \
  264 	raw_spin_trylock(lock) ? \
  265 	1 : ({ local_irq_restore(flags); 0; }); \
  266 })
  267 
  268 /**
  269  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  270  * @lock: the spinlock in question.
  271  */
  272 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  273 
  274 /* Include rwlock functions */
  275 #include <linux/rwlock.h>
  276 
  277 /*
  278  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  279  */
  280 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  281 # include <linux/spinlock_api_smp.h>
  282 #else
  283 # include <linux/spinlock_api_up.h>
  284 #endif
  285 
  286 /*
  287  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  288  */
  289 
  290 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  291 {
  292 	return &lock->rlock;
  293 }
  294 
  295 #define spin_lock_init(_lock)				\
  296 do {							\
  297 	spinlock_check(_lock);				\
  298 	raw_spin_lock_init(&(_lock)->rlock);		\
  299 } while (0)
  300 
  301 static inline void spin_lock(spinlock_t *lock)
  302 {
  303 	raw_spin_lock(&lock->rlock);
  304 }
  305 
  306 static inline void spin_lock_bh(spinlock_t *lock)
  307 {
  308 	raw_spin_lock_bh(&lock->rlock);
  309 }
  310 
  311 static inline int spin_trylock(spinlock_t *lock)
  312 {
  313 	return raw_spin_trylock(&lock->rlock);
  314 }
  315 
  316 #define spin_lock_nested(lock, subclass)			\
  317 do {								\
  318 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  319 } while (0)
  320 
  321 #define spin_lock_nest_lock(lock, nest_lock)				\
  322 do {									\
  323 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  324 } while (0)
  325 
  326 static inline void spin_lock_irq(spinlock_t *lock)
  327 {
  328 	raw_spin_lock_irq(&lock->rlock);
  329 }
  330 
  331 #define spin_lock_irqsave(lock, flags)				\
  332 do {								\
  333 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  334 } while (0)
  335 
  336 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  337 do {									\
  338 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  339 } while (0)
  340 
  341 static inline void spin_unlock(spinlock_t *lock)
  342 {
  343 	raw_spin_unlock(&lock->rlock);
  344 }
  345 
  346 static inline void spin_unlock_bh(spinlock_t *lock)
  347 {
  348 	raw_spin_unlock_bh(&lock->rlock);
  349 }
  350 
  351 static inline void spin_unlock_irq(spinlock_t *lock)
  352 {
  353 	raw_spin_unlock_irq(&lock->rlock);
  354 }
  355 
  356 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  357 {
  358 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  359 }
  360 
  361 static inline int spin_trylock_bh(spinlock_t *lock)
  362 {
  363 	return raw_spin_trylock_bh(&lock->rlock);
  364 }
  365 
  366 static inline int spin_trylock_irq(spinlock_t *lock)
  367 {
  368 	return raw_spin_trylock_irq(&lock->rlock);
  369 }
  370 
  371 #define spin_trylock_irqsave(lock, flags)			\
  372 ({								\
  373 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  374 })
  375 
  376 static inline void spin_unlock_wait(spinlock_t *lock)
  377 {
  378 	raw_spin_unlock_wait(&lock->rlock);
  379 }
  380 
  381 static inline int spin_is_locked(spinlock_t *lock)
  382 {
  383 	return raw_spin_is_locked(&lock->rlock);
  384 }
  385 
  386 static inline int spin_is_contended(spinlock_t *lock)
  387 {
  388 	return raw_spin_is_contended(&lock->rlock);
  389 }
  390 
  391 static inline int spin_can_lock(spinlock_t *lock)
  392 {
  393 	return raw_spin_can_lock(&lock->rlock);
  394 }
  395 
  396 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  397 
  398 /*
  399  * Pull the atomic_t declaration:
  400  * (asm-mips/atomic.h needs above definitions)
  401  */
  402 #include <linux/atomic.h>
  403 /**
  404  * atomic_dec_and_lock - lock on reaching reference count zero
  405  * @atomic: the atomic counter
  406  * @lock: the spinlock in question
  407  *
  408  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  409  * @lock.  Returns false for all other cases.
  410  */
  411 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  412 #define atomic_dec_and_lock(atomic, lock) \
  413 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  414 
  415 #endif /* __LINUX_SPINLOCK_H */                 1 #ifndef _UAPI_LINUX_SWAB_H
    2 #define _UAPI_LINUX_SWAB_H
    3 
    4 #include <linux/types.h>
    5 #include <linux/compiler.h>
    6 #include <asm/swab.h>
    7 
    8 /*
    9  * casts are necessary for constants, because we never know how for sure
   10  * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
   11  */
   12 #define ___constant_swab16(x) ((__u16)(				\
   13 	(((__u16)(x) & (__u16)0x00ffU) << 8) |			\
   14 	(((__u16)(x) & (__u16)0xff00U) >> 8)))
   15 
   16 #define ___constant_swab32(x) ((__u32)(				\
   17 	(((__u32)(x) & (__u32)0x000000ffUL) << 24) |		\
   18 	(((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |		\
   19 	(((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |		\
   20 	(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
   21 
   22 #define ___constant_swab64(x) ((__u64)(				\
   23 	(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) |	\
   24 	(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) |	\
   25 	(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) |	\
   26 	(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) |	\
   27 	(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) |	\
   28 	(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) |	\
   29 	(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) |	\
   30 	(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
   31 
   32 #define ___constant_swahw32(x) ((__u32)(			\
   33 	(((__u32)(x) & (__u32)0x0000ffffUL) << 16) |		\
   34 	(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
   35 
   36 #define ___constant_swahb32(x) ((__u32)(			\
   37 	(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) |		\
   38 	(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
   39 
   40 /*
   41  * Implement the following as inlines, but define the interface using
   42  * macros to allow constant folding when possible:
   43  * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
   44  */
   45 
   46 static inline __attribute_const__ __u16 __fswab16(__u16 val)
   47 {
   48 #ifdef __HAVE_BUILTIN_BSWAP16__
   49 	return __builtin_bswap16(val);
   50 #elif defined (__arch_swab16)
   51 	return __arch_swab16(val);
   52 #else
   53 	return ___constant_swab16(val);
   54 #endif
   55 }
   56 
   57 static inline __attribute_const__ __u32 __fswab32(__u32 val)
   58 {
   59 #ifdef __HAVE_BUILTIN_BSWAP32__
   60 	return __builtin_bswap32(val);
   61 #elif defined(__arch_swab32)
   62 	return __arch_swab32(val);
   63 #else
   64 	return ___constant_swab32(val);
   65 #endif
   66 }
   67 
   68 static inline __attribute_const__ __u64 __fswab64(__u64 val)
   69 {
   70 #ifdef __HAVE_BUILTIN_BSWAP64__
   71 	return __builtin_bswap64(val);
   72 #elif defined (__arch_swab64)
   73 	return __arch_swab64(val);
   74 #elif defined(__SWAB_64_THRU_32__)
   75 	__u32 h = val >> 32;
   76 	__u32 l = val & ((1ULL << 32) - 1);
   77 	return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
   78 #else
   79 	return ___constant_swab64(val);
   80 #endif
   81 }
   82 
   83 static inline __attribute_const__ __u32 __fswahw32(__u32 val)
   84 {
   85 #ifdef __arch_swahw32
   86 	return __arch_swahw32(val);
   87 #else
   88 	return ___constant_swahw32(val);
   89 #endif
   90 }
   91 
   92 static inline __attribute_const__ __u32 __fswahb32(__u32 val)
   93 {
   94 #ifdef __arch_swahb32
   95 	return __arch_swahb32(val);
   96 #else
   97 	return ___constant_swahb32(val);
   98 #endif
   99 }
  100 
  101 /**
  102  * __swab16 - return a byteswapped 16-bit value
  103  * @x: value to byteswap
  104  */
  105 #define __swab16(x)				\
  106 	(__builtin_constant_p((__u16)(x)) ?	\
  107 	___constant_swab16(x) :			\
  108 	__fswab16(x))
  109 
  110 /**
  111  * __swab32 - return a byteswapped 32-bit value
  112  * @x: value to byteswap
  113  */
  114 #define __swab32(x)				\
  115 	(__builtin_constant_p((__u32)(x)) ?	\
  116 	___constant_swab32(x) :			\
  117 	__fswab32(x))
  118 
  119 /**
  120  * __swab64 - return a byteswapped 64-bit value
  121  * @x: value to byteswap
  122  */
  123 #define __swab64(x)				\
  124 	(__builtin_constant_p((__u64)(x)) ?	\
  125 	___constant_swab64(x) :			\
  126 	__fswab64(x))
  127 
  128 /**
  129  * __swahw32 - return a word-swapped 32-bit value
  130  * @x: value to wordswap
  131  *
  132  * __swahw32(0x12340000) is 0x00001234
  133  */
  134 #define __swahw32(x)				\
  135 	(__builtin_constant_p((__u32)(x)) ?	\
  136 	___constant_swahw32(x) :		\
  137 	__fswahw32(x))
  138 
  139 /**
  140  * __swahb32 - return a high and low byte-swapped 32-bit value
  141  * @x: value to byteswap
  142  *
  143  * __swahb32(0x12345678) is 0x34127856
  144  */
  145 #define __swahb32(x)				\
  146 	(__builtin_constant_p((__u32)(x)) ?	\
  147 	___constant_swahb32(x) :		\
  148 	__fswahb32(x))
  149 
  150 /**
  151  * __swab16p - return a byteswapped 16-bit value from a pointer
  152  * @p: pointer to a naturally-aligned 16-bit value
  153  */
  154 static inline __u16 __swab16p(const __u16 *p)
  155 {
  156 #ifdef __arch_swab16p
  157 	return __arch_swab16p(p);
  158 #else
  159 	return __swab16(*p);
  160 #endif
  161 }
  162 
  163 /**
  164  * __swab32p - return a byteswapped 32-bit value from a pointer
  165  * @p: pointer to a naturally-aligned 32-bit value
  166  */
  167 static inline __u32 __swab32p(const __u32 *p)
  168 {
  169 #ifdef __arch_swab32p
  170 	return __arch_swab32p(p);
  171 #else
  172 	return __swab32(*p);
  173 #endif
  174 }
  175 
  176 /**
  177  * __swab64p - return a byteswapped 64-bit value from a pointer
  178  * @p: pointer to a naturally-aligned 64-bit value
  179  */
  180 static inline __u64 __swab64p(const __u64 *p)
  181 {
  182 #ifdef __arch_swab64p
  183 	return __arch_swab64p(p);
  184 #else
  185 	return __swab64(*p);
  186 #endif
  187 }
  188 
  189 /**
  190  * __swahw32p - return a wordswapped 32-bit value from a pointer
  191  * @p: pointer to a naturally-aligned 32-bit value
  192  *
  193  * See __swahw32() for details of wordswapping.
  194  */
  195 static inline __u32 __swahw32p(const __u32 *p)
  196 {
  197 #ifdef __arch_swahw32p
  198 	return __arch_swahw32p(p);
  199 #else
  200 	return __swahw32(*p);
  201 #endif
  202 }
  203 
  204 /**
  205  * __swahb32p - return a high and low byteswapped 32-bit value from a pointer
  206  * @p: pointer to a naturally-aligned 32-bit value
  207  *
  208  * See __swahb32() for details of high/low byteswapping.
  209  */
  210 static inline __u32 __swahb32p(const __u32 *p)
  211 {
  212 #ifdef __arch_swahb32p
  213 	return __arch_swahb32p(p);
  214 #else
  215 	return __swahb32(*p);
  216 #endif
  217 }
  218 
  219 /**
  220  * __swab16s - byteswap a 16-bit value in-place
  221  * @p: pointer to a naturally-aligned 16-bit value
  222  */
  223 static inline void __swab16s(__u16 *p)
  224 {
  225 #ifdef __arch_swab16s
  226 	__arch_swab16s(p);
  227 #else
  228 	*p = __swab16p(p);
  229 #endif
  230 }
  231 /**
  232  * __swab32s - byteswap a 32-bit value in-place
  233  * @p: pointer to a naturally-aligned 32-bit value
  234  */
  235 static inline void __swab32s(__u32 *p)
  236 {
  237 #ifdef __arch_swab32s
  238 	__arch_swab32s(p);
  239 #else
  240 	*p = __swab32p(p);
  241 #endif
  242 }
  243 
  244 /**
  245  * __swab64s - byteswap a 64-bit value in-place
  246  * @p: pointer to a naturally-aligned 64-bit value
  247  */
  248 static inline void __swab64s(__u64 *p)
  249 {
  250 #ifdef __arch_swab64s
  251 	__arch_swab64s(p);
  252 #else
  253 	*p = __swab64p(p);
  254 #endif
  255 }
  256 
  257 /**
  258  * __swahw32s - wordswap a 32-bit value in-place
  259  * @p: pointer to a naturally-aligned 32-bit value
  260  *
  261  * See __swahw32() for details of wordswapping
  262  */
  263 static inline void __swahw32s(__u32 *p)
  264 {
  265 #ifdef __arch_swahw32s
  266 	__arch_swahw32s(p);
  267 #else
  268 	*p = __swahw32p(p);
  269 #endif
  270 }
  271 
  272 /**
  273  * __swahb32s - high and low byteswap a 32-bit value in-place
  274  * @p: pointer to a naturally-aligned 32-bit value
  275  *
  276  * See __swahb32() for details of high and low byte swapping
  277  */
  278 static inline void __swahb32s(__u32 *p)
  279 {
  280 #ifdef __arch_swahb32s
  281 	__arch_swahb32s(p);
  282 #else
  283 	*p = __swahb32p(p);
  284 #endif
  285 }
  286 
  287 
  288 #endif /* _UAPI_LINUX_SWAB_H */                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report | 
| linux-3.14.1.tar.xz | drivers/net/wireless/prism54/prism54.ko | 331_1a | CPAchecker | Bug | Fixed | 2015-12-26 01:47:26 | L0215 | 
Comment
Reported: 26 Dec 2015
[Home]