Error Trace
[Home]
Bug # 161
Show/hide error trace Error trace
{ 19 typedef signed char __s8; 20 typedef unsigned char __u8; 22 typedef short __s16; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 32 typedef __u16 __le16; 33 typedef __u16 __be16; 34 typedef __u32 __le32; 35 typedef __u32 __be32; 37 typedef __u64 __be64; 40 typedef __u32 __wsum; 280 struct kernel_symbol { unsigned long value; const char *name; } ; 34 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 106 typedef __u8 uint8_t; 108 typedef __u32 uint32_t; 111 typedef __u64 uint64_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 161 typedef u64 phys_addr_t; 166 typedef phys_addr_t resource_size_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 115 typedef void (*ctor_fn_t)(); 68 struct ctl_table ; 58 struct device ; 64 struct net_device ; 450 struct file_operations ; 462 struct completion ; 463 struct pt_regs ; 557 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 114 struct timespec ; 115 struct compat_timespec ; 116 struct thread_info { unsigned long flags; } ; 20 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 20 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 20 struct pollfd ; 20 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 20 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ; 20 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ; 39 struct page ; 26 struct task_struct ; 27 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_32 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_32 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_33 pgd_t; 297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_35 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 445 struct seq_file ; 481 struct thread_struct ; 483 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 83 struct static_key { atomic_t enabled; } ; 23 typedef atomic64_t atomic_long_t; 359 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 654 typedef struct cpumask *cpumask_var_t; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 246 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ; 26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ; 169 struct seq_operations ; 372 struct perf_event ; 377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ; 377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t; 378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 298 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 10 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 34 struct vm_area_struct ; 15 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 66 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ; 220 struct pci_dev ; 33 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_139 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_138 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_139 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_138 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_140 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_140 rwlock_t; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 407 struct __anonstruct_seqlock_t_155 { struct seqcount seqcount; spinlock_t lock; } ; 407 typedef struct __anonstruct_seqlock_t_155 seqlock_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 7 typedef __s64 time64_t; 450 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 109 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 1225 struct completion { unsigned int done; wait_queue_head_t wait; } ; 1144 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_160 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_160 kuid_t; 27 struct __anonstruct_kgid_t_161 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_161 kgid_t; 835 struct nsproxy ; 836 struct ctl_table_root ; 837 struct ctl_table_header ; 838 struct ctl_dir ; 39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 61 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ; 100 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ; 121 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ; 126 struct __anonstruct____missing_field_name_163 { struct ctl_table *ctl_table; int used; int count; int nreg; } ; 126 union __anonunion____missing_field_name_162 { struct __anonstruct____missing_field_name_163 __annonCompField21; struct callback_head rcu; } ; 126 struct ctl_table_set ; 126 struct ctl_table_header { union __anonunion____missing_field_name_162 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ; 147 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ; 153 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ; 158 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ; 278 struct workqueue_struct ; 279 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 97 struct __anonstruct_nodemask_t_164 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_164 nodemask_t; 144 struct pci_bus ; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 34 struct ldt_struct ; 34 struct vdso_image ; 34 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; } ; 34 typedef struct __anonstruct_mm_context_t_165 mm_context_t; 22 struct bio_vec ; 162 struct notifier_block ; 82 struct free_area { struct list_head free_list[6U]; unsigned long nr_free; } ; 94 struct pglist_data ; 95 struct zone_padding { char x[0U]; } ; 210 struct zone_reclaim_stat { unsigned long recent_rotated[2U]; unsigned long recent_scanned[2U]; } ; 223 struct lruvec { struct list_head lists[5U]; struct zone_reclaim_stat reclaim_stat; atomic_long_t inactive_age; struct pglist_data *pgdat; } ; 249 typedef unsigned int isolate_mode_t; 257 struct per_cpu_pages { int count; int high; int batch; struct list_head lists[3U]; } ; 270 struct per_cpu_pageset { struct per_cpu_pages pcp; s8 expire; s8 stat_threshold; s8 vm_stat_diff[21U]; } ; 280 struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[26U]; } ; 286 enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4 } ; 294 struct zone { unsigned long watermark[3U]; unsigned long nr_reserved_highatomic; long lowmem_reserve[4U]; int node; struct pglist_data *zone_pgdat; struct per_cpu_pageset *pageset; unsigned long zone_start_pfn; unsigned long managed_pages; unsigned long spanned_pages; unsigned long present_pages; const char *name; unsigned long nr_isolate_pageblock; wait_queue_head_t *wait_table; unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; struct zone_padding _pad1_; struct free_area free_area[11U]; unsigned long flags; spinlock_t lock; struct zone_padding _pad2_; unsigned long percpu_drift_mark; unsigned long compact_cached_free_pfn; unsigned long compact_cached_migrate_pfn[2U]; unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; bool compact_blockskip_flush; bool contiguous; struct zone_padding _pad3_; atomic_long_t vm_stat[21U]; } ; 562 struct zoneref { struct zone *zone; int zone_idx; } ; 587 struct zonelist { struct zoneref _zonerefs[4097U]; } ; 610 struct pglist_data { struct zone node_zones[4U]; struct zonelist node_zonelists[2U]; int nr_zones; unsigned long node_start_pfn; unsigned long node_present_pages; unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; int kswapd_order; enum zone_type kswapd_classzone_idx; int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; spinlock_t numabalancing_migrate_lock; unsigned long numabalancing_migrate_next_window; unsigned long numabalancing_migrate_nr_pages; unsigned long totalreserve_pages; unsigned long min_unmapped_pages; unsigned long min_slab_pages; struct zone_padding _pad1_; spinlock_t lru_lock; spinlock_t split_queue_lock; struct list_head split_queue; unsigned long split_queue_len; struct lruvec lruvec; unsigned int inactive_ratio; unsigned long flags; struct zone_padding _pad2_; struct per_cpu_nodestat *per_cpu_nodestats; atomic_long_t vm_stat[26U]; } ; 775 struct rw_semaphore ; 776 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ; 64 struct irq_domain ; 422 union __anonunion____missing_field_name_205 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 422 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_205 __annonCompField33; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 199 struct dentry ; 200 struct iattr ; 201 struct super_block ; 202 struct file_system_type ; 203 struct kernfs_open_node ; 204 struct kernfs_iattrs ; 227 struct kernfs_root ; 227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_210 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_210 __annonCompField34; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 173 struct vm_operations_struct ; 173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 286 struct inode ; 511 struct sock ; 512 struct kobject ; 513 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 519 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 135 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct bin_attribute ; 37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 224 struct proc_dir_entry ; 130 struct exception_table_entry { int insn; int fixup; int handler; } ; 61 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ; 125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 494 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 716 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 21 struct pipe_inode_info ; 22 struct kvec { void *iov_base; size_t iov_len; } ; 29 union __anonunion____missing_field_name_222 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; struct pipe_inode_info *pipe; } ; 29 union __anonunion____missing_field_name_223 { unsigned long nr_segs; int idx; } ; 29 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_222 __annonCompField43; union __anonunion____missing_field_name_223 __annonCompField44; } ; 11 typedef unsigned short __kernel_sa_family_t; 18 struct pid ; 19 struct cred ; 23 typedef __kernel_sa_family_t sa_family_t; 24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ; 38 struct kiocb ; 38 struct msghdr { void *msg_name; int msg_namelen; struct iov_iter msg_iter; void *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; struct kiocb *msg_iocb; } ; 43 struct __anonstruct_sync_serial_settings_224 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ; 43 typedef struct __anonstruct_sync_serial_settings_224 sync_serial_settings; 50 struct __anonstruct_te1_settings_225 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ; 50 typedef struct __anonstruct_te1_settings_225 te1_settings; 55 struct __anonstruct_raw_hdlc_proto_226 { unsigned short encoding; unsigned short parity; } ; 55 typedef struct __anonstruct_raw_hdlc_proto_226 raw_hdlc_proto; 65 struct __anonstruct_fr_proto_227 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ; 65 typedef struct __anonstruct_fr_proto_227 fr_proto; 69 struct __anonstruct_fr_proto_pvc_228 { unsigned int dlci; } ; 69 typedef struct __anonstruct_fr_proto_pvc_228 fr_proto_pvc; 74 struct __anonstruct_fr_proto_pvc_info_229 { unsigned int dlci; char master[16U]; } ; 74 typedef struct __anonstruct_fr_proto_pvc_info_229 fr_proto_pvc_info; 79 struct __anonstruct_cisco_proto_230 { unsigned int interval; unsigned int timeout; } ; 79 typedef struct __anonstruct_cisco_proto_230 cisco_proto; 117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ; 197 union __anonunion_ifs_ifsu_231 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ; 197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_231 ifs_ifsu; } ; 216 union __anonunion_ifr_ifrn_232 { char ifrn_name[16U]; } ; 216 union __anonunion_ifr_ifru_233 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ; 216 struct ifreq { union __anonunion_ifr_ifrn_232 ifr_ifrn; union __anonunion_ifr_ifru_233 ifr_ifru; } ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_240 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_241 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_239 { struct __anonstruct____missing_field_name_240 __annonCompField47; struct __anonstruct____missing_field_name_241 __annonCompField48; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_239 __annonCompField49; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 110 struct xol_area ; 111 struct uprobes_state { struct xol_area *xol_area; } ; 150 struct address_space ; 151 struct mem_cgroup ; 152 union __anonunion____missing_field_name_242 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 152 union __anonunion____missing_field_name_243 { unsigned long index; void *freelist; } ; 152 struct __anonstruct____missing_field_name_247 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 152 union __anonunion____missing_field_name_246 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_247 __annonCompField52; int units; } ; 152 struct __anonstruct____missing_field_name_245 { union __anonunion____missing_field_name_246 __annonCompField53; atomic_t _refcount; } ; 152 union __anonunion____missing_field_name_244 { unsigned long counters; struct __anonstruct____missing_field_name_245 __annonCompField54; } ; 152 struct dev_pagemap ; 152 struct __anonstruct____missing_field_name_249 { struct page *next; int pages; int pobjects; } ; 152 struct __anonstruct____missing_field_name_250 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 152 struct __anonstruct____missing_field_name_251 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 152 union __anonunion____missing_field_name_248 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_249 __annonCompField56; struct callback_head callback_head; struct __anonstruct____missing_field_name_250 __annonCompField57; struct __anonstruct____missing_field_name_251 __annonCompField58; } ; 152 struct kmem_cache ; 152 union __anonunion____missing_field_name_252 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 152 struct page { unsigned long flags; union __anonunion____missing_field_name_242 __annonCompField50; union __anonunion____missing_field_name_243 __annonCompField51; union __anonunion____missing_field_name_244 __annonCompField55; union __anonunion____missing_field_name_248 __annonCompField59; union __anonunion____missing_field_name_252 __annonCompField60; struct mem_cgroup *mem_cgroup; } ; 197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 282 struct userfaultfd_ctx ; 282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 289 struct __anonstruct_shared_253 { struct rb_node rb; unsigned long rb_subtree_last; } ; 289 struct anon_vma ; 289 struct mempolicy ; 289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_253 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 362 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 381 struct task_rss_stat { int events; int count[4U]; } ; 389 struct mm_rss_stat { atomic_long_t count[4U]; } ; 394 struct kioctx_table ; 395 struct linux_binfmt ; 395 struct mmu_notifier_mm ; 395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 563 struct vm_fault ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_306 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_305 { struct __anonstruct____missing_field_name_306 __annonCompField61; } ; 114 struct lockref { union __anonunion____missing_field_name_305 __annonCompField62; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_308 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_307 { struct __anonstruct____missing_field_name_308 __annonCompField63; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_307 __annonCompField64; const unsigned char *name; } ; 65 struct dentry_operations ; 65 union __anonunion____missing_field_name_309 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 65 union __anonunion_d_u_310 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_309 __annonCompField65; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_310 d_u; } ; 121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 592 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 80 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 63 struct __anonstruct____missing_field_name_312 { struct radix_tree_node *parent; void *private_data; } ; 63 union __anonunion____missing_field_name_311 { struct __anonstruct____missing_field_name_312 __annonCompField66; struct callback_head callback_head; } ; 63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_311 __annonCompField67; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 531 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 538 struct pid_namespace ; 538 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; wait_queue_head_t writer; int readers_block; } ; 144 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 34 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ; 84 struct bio_set ; 85 struct bio ; 86 struct bio_integrity_payload ; 87 struct block_device ; 88 struct io_context ; 89 struct cgroup_subsys_state ; 18 typedef void bio_end_io_t(struct bio *); 19 union __anonunion____missing_field_name_320 { struct bio_integrity_payload *bi_integrity; } ; 19 struct bio { struct bio *bi_next; struct block_device *bi_bdev; int bi_error; unsigned int bi_opf; unsigned short bi_flags; unsigned short bi_ioprio; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; union __anonunion____missing_field_name_320 __annonCompField68; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ; 273 struct delayed_call { void (*fn)(void *); void *arg; } ; 264 struct backing_dev_info ; 265 struct bdi_writeback ; 266 struct export_operations ; 268 struct poll_table_struct ; 269 struct kstatfs ; 270 struct swap_info_struct ; 271 struct fscrypt_info ; 272 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 262 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_321 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_321 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_322 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_322 __annonCompField69; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 541 struct writeback_control ; 542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 365 union __anonunion_arg_324 { char *buf; void *data; } ; 365 struct __anonstruct_read_descriptor_t_323 { size_t written; size_t count; union __anonunion_arg_324 arg; int error; } ; 365 typedef struct __anonstruct_read_descriptor_t_323 read_descriptor_t; 368 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 427 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ; 449 struct request_queue ; 450 struct hd_struct ; 450 struct gendisk ; 450 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 565 struct posix_acl ; 592 struct inode_operations ; 592 union __anonunion____missing_field_name_327 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 592 union __anonunion____missing_field_name_328 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 592 struct file_lock_context ; 592 struct cdev ; 592 union __anonunion____missing_field_name_329 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 592 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_327 __annonCompField70; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_328 __annonCompField71; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_329 __annonCompField72; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 847 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 855 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 878 union __anonunion_f_u_330 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 878 struct file { union __anonunion_f_u_330 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 963 typedef void *fl_owner_t; 964 struct file_lock ; 965 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 971 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 992 struct net ; 998 struct nlm_lockowner ; 999 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_332 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_331 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_332 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_331 fl_u; } ; 1051 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1118 struct files_struct ; 1271 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1306 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1336 struct super_operations ; 1336 struct xattr_handler ; 1336 struct mtd_info ; 1336 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1620 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1633 struct dir_context ; 1658 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1665 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1734 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1784 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 2027 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 3211 enum ldv_19473 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; 53 typedef enum ldv_19473 socket_state; 70 struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; unsigned long flags; struct callback_head rcu; } ; 100 struct proto_ops ; 100 struct socket { socket_state state; short type; unsigned long flags; struct socket_wq *wq; struct file *file; struct sock *sk; const struct proto_ops *ops; } ; 126 struct sk_buff ; 134 struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, int); int (*getname)(struct socket *, struct sockaddr *, int *, int); unsigned int (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, unsigned long); int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, char *, unsigned int); int (*getsockopt)(struct socket *, int, int, char *, int *); int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct socket *, int, int, char *, int *); int (*sendmsg)(struct socket *, struct msghdr *, size_t ); int (*recvmsg)(struct socket *, struct msghdr *, size_t , int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*sendpage)(struct socket *, struct page *, int, size_t , int); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*set_peek_off)(struct sock *, int); int (*peek_len)(struct socket *); int (*read_sock)(struct sock *, read_descriptor_t *, int (*)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t )); } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 26 struct sem_undo_list ; 26 struct sysv_sem { struct sem_undo_list *undo_list; } ; 78 struct user_struct ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_333 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_333 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 38 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_335 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_336 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_337 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_338 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_341 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_340 { struct __anonstruct__addr_bnd_341 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_339 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_340 __annonCompField73; } ; 11 struct __anonstruct__sigpoll_342 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_343 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_334 { int _pad[28U]; struct __anonstruct__kill_335 _kill; struct __anonstruct__timer_336 _timer; struct __anonstruct__rt_337 _rt; struct __anonstruct__sigchld_338 _sigchld; struct __anonstruct__sigfault_339 _sigfault; struct __anonstruct__sigpoll_342 _sigpoll; struct __anonstruct__sigsys_343 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_334 _sifields; } ; 118 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 271 struct k_sigaction { struct sigaction sa; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 41 struct assoc_array_ptr ; 41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_346 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_347 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_349 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_348 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_349 __annonCompField76; } ; 128 struct __anonstruct____missing_field_name_351 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_350 { union key_payload payload; struct __anonstruct____missing_field_name_351 __annonCompField78; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_346 __annonCompField74; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_347 __annonCompField75; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_348 __annonCompField77; union __anonunion____missing_field_name_350 __annonCompField79; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 377 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ; 85 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 368 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 325 struct cgroup ; 326 struct cgroup_root ; 327 struct cgroup_subsys ; 328 struct cgroup_taskset ; 372 struct cgroup_file { struct kernfs_node *kn; } ; 90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ; 141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ; 221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ; 306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 546 struct __anonstruct____missing_field_name_355 { u8 is_data; u8 padding; u16 prioidx; u32 classid; } ; 546 union __anonunion____missing_field_name_354 { struct __anonstruct____missing_field_name_355 __annonCompField80; u64 val; } ; 546 struct sock_cgroup_data { union __anonunion____missing_field_name_354 __annonCompField81; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 134 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 495 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 539 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 547 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 554 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 579 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 595 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 617 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 662 struct autogroup ; 663 struct tty_struct ; 663 struct taskstats ; 663 struct tty_audit_buf ; 663 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; } ; 839 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 884 struct reclaim_state ; 885 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 900 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 957 struct wake_q_node { struct wake_q_node *next; } ; 1234 struct uts_namespace ; 1235 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1243 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1301 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1336 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1373 struct rt_rq ; 1373 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1391 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1455 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1474 struct sched_class ; 1474 struct compat_robust_list_head ; 1474 struct numa_group ; 1474 struct kcov ; 1474 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ; 161 struct in6_addr ; 184 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 68 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 76 struct dma_map_ops ; 76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 34 struct iommu_fwspec ; 62 struct device_attribute ; 62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 143 struct device_type ; 202 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 208 struct of_device_id ; 208 struct acpi_device_id ; 208 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 358 struct class_attribute ; 358 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 451 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 519 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 547 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 700 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 709 struct dma_coherent_mem ; 709 struct cma ; 709 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ; 865 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 1330 struct scatterlist ; 96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ; 308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 2450 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 406 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 15 typedef u64 netdev_features_t; 70 union __anonunion_in6_u_392 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ; 70 struct in6_addr { union __anonunion_in6_u_392 in6_u; } ; 46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ; 205 struct pipe_buf_operations ; 205 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ; 27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ; 63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ; 295 struct flowi_tunnel { __be64 tun_id; } ; 26 struct flowi_common { int flowic_oif; int flowic_iif; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; __u8 flowic_proto; __u8 flowic_flags; __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; } ; 41 struct __anonstruct_ports_399 { __be16 dport; __be16 sport; } ; 41 struct __anonstruct_icmpt_400 { __u8 type; __u8 code; } ; 41 struct __anonstruct_dnports_401 { __le16 dport; __le16 sport; } ; 41 struct __anonstruct_mht_402 { __u8 type; } ; 41 union flowi_uli { struct __anonstruct_ports_399 ports; struct __anonstruct_icmpt_400 icmpt; struct __anonstruct_dnports_401 dnports; __be32 spi; __be32 gre_key; struct __anonstruct_mht_402 mht; } ; 65 struct flowi4 { struct flowi_common __fl_common; __be32 saddr; __be32 daddr; union flowi_uli uli; } ; 122 struct flowi6 { struct flowi_common __fl_common; struct in6_addr daddr; struct in6_addr saddr; __be32 flowlabel; union flowi_uli uli; } ; 140 struct flowidn { struct flowi_common __fl_common; __le16 daddr; __le16 saddr; union flowi_uli uli; } ; 160 union __anonunion_u_403 { struct flowi_common __fl_common; struct flowi4 ip4; struct flowi6 ip6; struct flowidn dn; } ; 160 struct flowi { union __anonunion_u_403 u; } ; 264 struct napi_struct ; 265 struct nf_conntrack { atomic_t use; } ; 254 union __anonunion____missing_field_name_404 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ; 254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_404 __annonCompField90; } ; 278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ; 500 typedef unsigned int sk_buff_data_t; 501 struct __anonstruct____missing_field_name_407 { u32 stamp_us; u32 stamp_jiffies; } ; 501 union __anonunion____missing_field_name_406 { u64 v64; struct __anonstruct____missing_field_name_407 __annonCompField91; } ; 501 struct skb_mstamp { union __anonunion____missing_field_name_406 __annonCompField92; } ; 564 union __anonunion____missing_field_name_410 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ; 564 struct __anonstruct____missing_field_name_409 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_410 __annonCompField93; } ; 564 union __anonunion____missing_field_name_408 { struct __anonstruct____missing_field_name_409 __annonCompField94; struct rb_node rbnode; } ; 564 struct sec_path ; 564 struct __anonstruct____missing_field_name_412 { __u16 csum_start; __u16 csum_offset; } ; 564 union __anonunion____missing_field_name_411 { __wsum csum; struct __anonstruct____missing_field_name_412 __annonCompField96; } ; 564 union __anonunion____missing_field_name_413 { unsigned int napi_id; unsigned int sender_cpu; } ; 564 union __anonunion____missing_field_name_414 { __u32 mark; __u32 reserved_tailroom; } ; 564 union __anonunion____missing_field_name_415 { __be16 inner_protocol; __u8 inner_ipproto; } ; 564 struct sk_buff { union __anonunion____missing_field_name_408 __annonCompField95; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0U]; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; unsigned char __unused; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; unsigned char offload_fwd_mark; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_411 __annonCompField97; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_413 __annonCompField98; __u32 secmark; union __anonunion____missing_field_name_414 __annonCompField99; union __anonunion____missing_field_name_415 __annonCompField100; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ; 838 struct dst_entry ; 887 struct rtable ; 1418 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ; 18 typedef s32 compat_time_t; 39 typedef s32 compat_long_t; 45 typedef u32 compat_uptr_t; 46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ; 278 struct compat_robust_list { compat_uptr_t next; } ; 282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ; 722 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ; 131 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ; 195 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ; 239 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ; 251 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ; 273 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ; 299 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ; 328 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ; 345 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ; 444 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ; 481 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ; 509 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ; 613 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ; 645 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ; 687 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ; 720 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ; 736 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ; 756 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ; 774 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ; 790 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ; 806 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ; 823 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ; 842 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ; 892 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ; 1063 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ; 1071 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ; 1147 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ; 1522 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ; 39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; 97 struct __anonstruct_link_modes_436 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ; 97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_436 link_modes; } ; 158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ; 375 struct prot_inuse ; 376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ; 38 struct u64_stats_sync { } ; 164 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ; 61 struct icmp_mib { unsigned long mibs[28U]; } ; 67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ; 72 struct icmpv6_mib { unsigned long mibs[6U]; } ; 83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ; 93 struct tcp_mib { unsigned long mibs[16U]; } ; 100 struct udp_mib { unsigned long mibs[9U]; } ; 106 struct linux_mib { unsigned long mibs[118U]; } ; 112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ; 118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ; 26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ; 12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ; 14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ; 187 struct ipv4_devconf ; 188 struct fib_rules_ops ; 189 struct fib_table ; 190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ; 24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ; 29 struct inet_peer_base ; 29 struct xt_table ; 29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; atomic_t rt_genid; } ; 141 struct neighbour ; 141 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ; 73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ; 40 struct ipv6_devconf ; 40 struct rt6_info ; 40 struct rt6_statistics ; 40 struct fib6_table ; 40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; } ; 89 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ; 95 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ; 14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ; 20 struct sctp_mib ; 21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ; 141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ; 79 struct nf_logger ; 80 struct nf_queue_handler ; 81 struct nf_hook_entry ; 81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct nf_hook_entry *hooks[13U][8U]; } ; 21 struct ebt_table ; 22 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ; 19 struct hlist_nulls_node ; 19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ; 23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ; 32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; unsigned int users; } ; 21 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ; 26 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ; 40 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 45 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ; 50 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; } ; 58 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ; 64 struct ip_conntrack_stat ; 64 struct nf_ct_event_notifier ; 64 struct nf_exp_event_notifier ; 64 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; } ; 96 struct nft_af_info ; 97 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ; 509 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ; 16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ; 25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ; 21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ; 30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ; 87 struct mpls_route ; 88 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ; 16 struct proc_ns_operations ; 17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ; 11 struct net_generic ; 12 struct netns_ipvs ; 13 struct ucounts ; 13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; struct ucounts *ucounts; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ; 248 struct __anonstruct_possible_net_t_442 { struct net *net; } ; 248 typedef struct __anonstruct_possible_net_t_442 possible_net_t; 13 typedef unsigned long kernel_ulong_t; 14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 674 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ; 683 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 296 struct mii_bus ; 303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ; 41 struct mdio_driver_common { struct device_driver driver; int flags; } ; 244 struct phy_device ; 19 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 53 struct kernel_param ; 58 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_453 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_453 __annonCompField104; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 24 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 329 struct module_sect_attrs ; 329 struct module_notes_attrs ; 329 struct trace_event_call ; 329 struct trace_enum_map ; 329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 799 enum ldv_31452 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_TRGMII = 16, PHY_INTERFACE_MODE_MAX = 17 } ; 85 typedef enum ldv_31452 phy_interface_t; 133 enum ldv_31504 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; 140 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_31504 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ; 221 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; 236 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ; 329 struct phy_driver ; 329 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; int autoneg; int link_timeout; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; void (*adjust_link)(struct net_device *); } ; 431 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ; 844 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ; 27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_PROTO_QCA = 5, DSA_TAG_LAST = 6 } ; 37 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ; 71 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ; 87 struct packet_type ; 88 struct dsa_switch ; 88 struct dsa_device_ops ; 88 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ; 141 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; u8 stp_state; } ; 148 struct dsa_switch_ops ; 148 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_ops *ops; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ; 235 struct switchdev_trans ; 236 struct switchdev_obj ; 237 struct switchdev_obj_port_fdb ; 238 struct switchdev_obj_port_mdb ; 239 struct switchdev_obj_port_vlan ; 240 struct dsa_switch_ops { struct list_head list; const char * (*probe)(struct device *, struct device *, int, void **); enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); void (*port_fast_age)(struct dsa_switch *, int); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *)); } ; 407 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ; 69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ; 87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ; 132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ; 144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ; 164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ; 187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ; 202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ; 236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ; 40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ; 105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ; 58 struct mnt_namespace ; 59 struct ipc_namespace ; 60 struct cgroup_namespace ; 61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ; 86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ; 19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ; 31 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; int ucount_max[7U]; } ; 63 struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_t ucount[7U]; } ; 631 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; } ; 686 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ; 99 struct xfrm_policy ; 100 struct xfrm_state ; 116 struct request_sock ; 41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ; 143 struct nlattr { __u16 nla_len; __u16 nla_type; } ; 105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ; 183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ; 41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ; 866 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ; 16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; } ; 118 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ; 96 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ; 117 struct netpoll_info ; 118 struct wireless_dev ; 119 struct wpan_dev ; 120 struct mpls_dev ; 121 struct udp_tunnel_info ; 122 struct bpf_prog ; 70 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ; 113 typedef enum netdev_tx netdev_tx_t; 132 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ; 195 struct neigh_parms ; 196 struct netdev_hw_addr { struct list_head list; unsigned char addr[32U]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; } ; 216 struct netdev_hw_addr_list { struct list_head list; int count; } ; 221 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ; 250 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ; 301 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ; 347 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; 395 typedef enum rx_handler_result rx_handler_result_t; 396 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); 541 struct Qdisc ; 541 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ; 612 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ; 624 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ; 636 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ; 688 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ; 711 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ; 724 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ; 735 struct netdev_tc_txq { u16 count; u16 offset; } ; 746 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ; 762 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ; 790 struct tc_cls_u32_offload ; 791 struct tc_cls_flower_offload ; 791 struct tc_cls_matchall_offload ; 791 struct tc_cls_bpf_offload ; 791 union __anonunion____missing_field_name_469 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; } ; 791 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_469 __annonCompField106; } ; 807 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ; 812 union __anonunion____missing_field_name_470 { struct bpf_prog *prog; bool prog_attached; } ; 812 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_470 __annonCompField107; } ; 835 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ; 1371 struct __anonstruct_adj_list_471 { struct list_head upper; struct list_head lower; } ; 1371 struct __anonstruct_all_adj_list_472 { struct list_head upper; struct list_head lower; } ; 1371 struct iw_handler_def ; 1371 struct iw_public_data ; 1371 struct switchdev_ops ; 1371 struct l3mdev_ops ; 1371 struct ndisc_ops ; 1371 struct vlan_info ; 1371 struct tipc_bearer ; 1371 struct in_device ; 1371 struct dn_dev ; 1371 struct inet6_dev ; 1371 struct tcf_proto ; 1371 struct cpu_rmap ; 1371 struct pcpu_lstats ; 1371 struct pcpu_sw_netstats ; 1371 struct pcpu_dstats ; 1371 struct pcpu_vstats ; 1371 union __anonunion____missing_field_name_473 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ; 1371 struct garp_port ; 1371 struct mrp_port ; 1371 struct rtnl_link_ops ; 1371 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_471 adj_list; struct __anonstruct_all_adj_list_472 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct nf_hook_entry *nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; struct hlist_head qdisc_hash[16U]; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_473 __annonCompField108; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ; 2180 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ; 2210 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ; 70 struct hotplug_slot ; 70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ; 108 typedef int pci_power_t; 135 typedef unsigned int pci_channel_state_t; 136 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; 161 typedef unsigned short pci_dev_flags_t; 188 typedef unsigned short pci_bus_flags_t; 246 struct pcie_link_state ; 247 struct pci_vpd ; 248 struct pci_sriov ; 250 struct pci_driver ; 250 union __anonunion____missing_field_name_482 { struct pci_sriov *sriov; struct pci_dev *physfn; } ; 250 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u16 aer_cap; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char bridge_d3; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned char hotplug_user_indicators; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct cpumask *irq_affinity; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; unsigned char non_compliant_bars; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; unsigned char ptm_root; unsigned char ptm_enabled; u8 ptm_granularity; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_482 __annonCompField109; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ; 465 struct pci_ops ; 465 struct msi_controller ; 465 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ; 589 struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ; 619 struct pci_dynids { spinlock_t lock; struct list_head list; } ; 633 typedef unsigned int pci_ers_result_t; 643 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ; 676 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ; 65 struct eeprom_93cx6 { void *data; void (*register_read)(struct eeprom_93cx6 *); void (*register_write)(struct eeprom_93cx6 *); int width; char drive_data; char reg_data_in; char reg_data_out; char reg_data_clock; char reg_chip_select; } ; 145 struct ieee80211_hdr { __le16 frame_control; __le16 duration_id; u8 addr1[6U]; u8 addr2[6U]; u8 addr3[6U]; __le16 seq_ctrl; u8 addr4[6U]; } ; 1201 struct ieee80211_p2p_noa_desc { u8 count; __le32 duration; __le32 interval; __le32 start_time; } ; 1220 struct ieee80211_p2p_noa_attr { u8 index; u8 oppps_ctwindow; struct ieee80211_p2p_noa_desc desc[4U]; } ; 1244 struct ieee80211_mcs_info { u8 rx_mask[10U]; __le16 rx_highest; u8 tx_params; u8 reserved[3U]; } ; 1269 struct ieee80211_ht_cap { __le16 cap_info; u8 ampdu_params_info; struct ieee80211_mcs_info mcs; __le16 extended_ht_cap_info; __le32 tx_BF_cap_info; u8 antenna_selection_info; } ; 1394 struct ieee80211_vht_mcs_info { __le16 rx_mcs_map; __le16 rx_highest; __le16 tx_mcs_map; __le16 tx_highest; } ; 1476 struct ieee80211_vht_cap { __le32 vht_cap_info; struct ieee80211_vht_mcs_info supp_mcs; } ; 550 enum nl80211_iftype { NL80211_IFTYPE_UNSPECIFIED = 0, NL80211_IFTYPE_ADHOC = 1, NL80211_IFTYPE_STATION = 2, NL80211_IFTYPE_AP = 3, NL80211_IFTYPE_AP_VLAN = 4, NL80211_IFTYPE_WDS = 5, NL80211_IFTYPE_MONITOR = 6, NL80211_IFTYPE_MESH_POINT = 7, NL80211_IFTYPE_P2P_CLIENT = 8, NL80211_IFTYPE_P2P_GO = 9, NL80211_IFTYPE_P2P_DEVICE = 10, NL80211_IFTYPE_OCB = 11, NL80211_IFTYPE_NAN = 12, NUM_NL80211_IFTYPES = 13, NL80211_IFTYPE_MAX = 12 } ; 587 struct nl80211_sta_flag_update { __u32 mask; __u32 set; } ; 2656 enum nl80211_reg_initiator { NL80211_REGDOM_SET_BY_CORE = 0, NL80211_REGDOM_SET_BY_USER = 1, NL80211_REGDOM_SET_BY_DRIVER = 2, NL80211_REGDOM_SET_BY_COUNTRY_IE = 3 } ; 2709 enum nl80211_dfs_regions { NL80211_DFS_UNSET = 0, NL80211_DFS_FCC = 1, NL80211_DFS_ETSI = 2, NL80211_DFS_JP = 3 } ; 2716 enum nl80211_user_reg_hint_type { NL80211_USER_REG_HINT_USER = 0, NL80211_USER_REG_HINT_CELL_BASE = 1, NL80211_USER_REG_HINT_INDOOR = 2 } ; 2750 enum nl80211_mesh_power_mode { NL80211_MESH_POWER_UNKNOWN = 0, NL80211_MESH_POWER_ACTIVE = 1, NL80211_MESH_POWER_LIGHT_SLEEP = 2, NL80211_MESH_POWER_DEEP_SLEEP = 3, __NL80211_MESH_POWER_AFTER_LAST = 4, NL80211_MESH_POWER_MAX = 3 } ; 2833 enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT = 0, NL80211_CHAN_WIDTH_20 = 1, NL80211_CHAN_WIDTH_40 = 2, NL80211_CHAN_WIDTH_80 = 3, NL80211_CHAN_WIDTH_80P80 = 4, NL80211_CHAN_WIDTH_160 = 5, NL80211_CHAN_WIDTH_5 = 6, NL80211_CHAN_WIDTH_10 = 7 } ; 2844 enum nl80211_bss_scan_width { NL80211_BSS_CHAN_WIDTH_20 = 0, NL80211_BSS_CHAN_WIDTH_10 = 1, NL80211_BSS_CHAN_WIDTH_5 = 2 } ; 2880 enum nl80211_auth_type { NL80211_AUTHTYPE_OPEN_SYSTEM = 0, NL80211_AUTHTYPE_SHARED_KEY = 1, NL80211_AUTHTYPE_FT = 2, NL80211_AUTHTYPE_NETWORK_EAP = 3, NL80211_AUTHTYPE_SAE = 4, __NL80211_AUTHTYPE_NUM = 5, NL80211_AUTHTYPE_MAX = 4, NL80211_AUTHTYPE_AUTOMATIC = 5 } ; 2898 enum nl80211_mfp { NL80211_MFP_NO = 0, NL80211_MFP_REQUIRED = 1 } ; 3804 enum nl80211_txrate_gi { NL80211_TXRATE_DEFAULT_GI = 0, NL80211_TXRATE_FORCE_SGI = 1, NL80211_TXRATE_FORCE_LGI = 2 } ; 3810 enum nl80211_band { NL80211_BAND_2GHZ = 0, NL80211_BAND_5GHZ = 1, NL80211_BAND_60GHZ = 2, NUM_NL80211_BANDS = 3 } ; 3842 enum nl80211_tx_power_setting { NL80211_TX_POWER_AUTOMATIC = 0, NL80211_TX_POWER_LIMITED = 1, NL80211_TX_POWER_FIXED = 2 } ; 3983 struct nl80211_wowlan_tcp_data_seq { __u32 start; __u32 offset; __u32 len; } ; 4120 struct nl80211_wowlan_tcp_data_token { __u32 offset; __u32 len; __u8 token_stream[]; } ; 4132 struct nl80211_wowlan_tcp_data_token_feature { __u32 min_len; __u32 max_len; __u32 bufsize; } ; 4388 enum nl80211_dfs_state { NL80211_DFS_USABLE = 0, NL80211_DFS_UNAVAILABLE = 1, NL80211_DFS_AVAILABLE = 2 } ; 4410 struct nl80211_vendor_cmd_info { __u32 vendor_id; __u32 subcmd; } ; 4910 enum nl80211_bss_select_attr { __NL80211_BSS_SELECT_ATTR_INVALID = 0, NL80211_BSS_SELECT_ATTR_RSSI = 1, NL80211_BSS_SELECT_ATTR_BAND_PREF = 2, NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 3, __NL80211_BSS_SELECT_ATTR_AFTER_LAST = 4, NL80211_BSS_SELECT_ATTR_MAX = 3 } ; 4925 enum nl80211_nan_function_type { NL80211_NAN_FUNC_PUBLISH = 0, NL80211_NAN_FUNC_SUBSCRIBE = 1, NL80211_NAN_FUNC_FOLLOW_UP = 2, __NL80211_NAN_FUNC_TYPE_AFTER_LAST = 3, NL80211_NAN_FUNC_MAX_TYPE = 2 } ; 4984 enum environment_cap { ENVIRON_ANY = 0, ENVIRON_INDOOR = 1, ENVIRON_OUTDOOR = 2 } ; 4990 struct regulatory_request { struct callback_head callback_head; int wiphy_idx; enum nl80211_reg_initiator initiator; enum nl80211_user_reg_hint_type user_reg_hint_type; char alpha2[2U]; enum nl80211_dfs_regions dfs_region; bool intersect; bool processed; enum environment_cap country_ie_env; struct list_head list; } ; 99 struct ieee80211_freq_range { u32 start_freq_khz; u32 end_freq_khz; u32 max_bandwidth_khz; } ; 185 struct ieee80211_power_rule { u32 max_antenna_gain; u32 max_eirp; } ; 190 struct ieee80211_reg_rule { struct ieee80211_freq_range freq_range; struct ieee80211_power_rule power_rule; u32 flags; u32 dfs_cac_ms; } ; 197 struct ieee80211_regdomain { struct callback_head callback_head; u32 n_reg_rules; char alpha2[3U]; enum nl80211_dfs_regions dfs_region; struct ieee80211_reg_rule reg_rules[]; } ; 205 struct wiphy ; 221 struct ieee80211_channel { enum nl80211_band band; u16 center_freq; u16 hw_value; u32 flags; int max_antenna_gain; int max_power; int max_reg_power; bool beacon_found; u32 orig_flags; int orig_mag; int orig_mpwr; enum nl80211_dfs_state dfs_state; unsigned long dfs_state_entered; unsigned int dfs_cac_ms; } ; 174 enum ieee80211_bss_type { IEEE80211_BSS_TYPE_ESS = 0, IEEE80211_BSS_TYPE_PBSS = 1, IEEE80211_BSS_TYPE_IBSS = 2, IEEE80211_BSS_TYPE_MBSS = 3, IEEE80211_BSS_TYPE_ANY = 4 } ; 188 struct ieee80211_rate { u32 flags; u16 bitrate; u16 hw_value; u16 hw_value_short; } ; 250 struct ieee80211_sta_ht_cap { u16 cap; bool ht_supported; u8 ampdu_factor; u8 ampdu_density; struct ieee80211_mcs_info mcs; } ; 270 struct ieee80211_sta_vht_cap { bool vht_supported; u32 cap; struct ieee80211_vht_mcs_info vht_mcs; } ; 286 struct ieee80211_supported_band { struct ieee80211_channel *channels; struct ieee80211_rate *bitrates; enum nl80211_band band; int n_channels; int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; } ; 353 struct key_params { const u8 *key; const u8 *seq; int key_len; int seq_len; u32 cipher; } ; 374 struct cfg80211_chan_def { struct ieee80211_channel *chan; enum nl80211_chan_width width; u32 center_freq1; u32 center_freq2; } ; 548 struct survey_info { struct ieee80211_channel *channel; u64 time; u64 time_busy; u64 time_ext_busy; u64 time_rx; u64 time_tx; u64 time_scan; u32 filled; s8 noise; } ; 595 struct cfg80211_crypto_settings { u32 wpa_versions; u32 cipher_group; int n_ciphers_pairwise; u32 ciphers_pairwise[5U]; int n_akm_suites; u32 akm_suites[2U]; bool control_port; __be16 control_port_ethertype; bool control_port_no_encrypt; struct key_params *wep_keys; int wep_tx_key; } ; 665 struct mac_address { u8 addr[6U]; } ; 685 struct __anonstruct_control_524 { u32 legacy; u8 ht_mcs[10U]; u16 vht_mcs[8U]; enum nl80211_txrate_gi gi; } ; 685 struct cfg80211_bitrate_mask { struct __anonstruct_control_524 control[3U]; } ; 932 struct rate_info { u8 flags; u8 mcs; u16 legacy; u8 nss; u8 bw; } ; 982 struct sta_bss_parameters { u8 flags; u8 dtim_period; u16 beacon_interval; } ; 1007 struct cfg80211_tid_stats { u32 filled; u64 rx_msdu; u64 tx_msdu; u64 tx_msdu_retries; u64 tx_msdu_failed; } ; 1025 struct station_info { u64 filled; u32 connected_time; u32 inactive_time; u64 rx_bytes; u64 tx_bytes; u16 llid; u16 plid; u8 plink_state; s8 signal; s8 signal_avg; u8 chains; s8 chain_signal[4U]; s8 chain_signal_avg[4U]; struct rate_info txrate; struct rate_info rxrate; u32 rx_packets; u32 tx_packets; u32 tx_retries; u32 tx_failed; u32 rx_dropped_misc; struct sta_bss_parameters bss_param; struct nl80211_sta_flag_update sta_flags; int generation; const u8 *assoc_req_ies; size_t assoc_req_ies_len; u32 beacon_loss_count; s64 t_offset; enum nl80211_mesh_power_mode local_pm; enum nl80211_mesh_power_mode peer_pm; enum nl80211_mesh_power_mode nonpeer_pm; u32 expected_throughput; u64 rx_beacon; u64 rx_duration; u8 rx_beacon_signal_avg; struct cfg80211_tid_stats pertid[17U]; } ; 1426 struct cfg80211_ssid { u8 ssid[32U]; u8 ssid_len; } ; 1459 struct cfg80211_scan_info { u64 scan_start_tsf; u8 tsf_bssid[6U]; bool aborted; } ; 1474 struct cfg80211_scan_request { struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; enum nl80211_bss_scan_width scan_width; const u8 *ie; size_t ie_len; u16 duration; bool duration_mandatory; u32 flags; u32 rates[3U]; struct wireless_dev *wdev; u8 mac_addr[6U]; u8 mac_addr_mask[6U]; u8 bssid[6U]; struct wiphy *wiphy; unsigned long scan_start; struct cfg80211_scan_info info; bool notified; bool no_cck; struct ieee80211_channel *channels[0U]; } ; 1547 struct cfg80211_match_set { struct cfg80211_ssid ssid; s32 rssi_thold; } ; 1555 struct cfg80211_sched_scan_plan { u32 interval; u32 iterations; } ; 1569 struct cfg80211_sched_scan_request { struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; enum nl80211_bss_scan_width scan_width; const u8 *ie; size_t ie_len; u32 flags; struct cfg80211_match_set *match_sets; int n_match_sets; s32 min_rssi_thold; u32 delay; struct cfg80211_sched_scan_plan *scan_plans; int n_scan_plans; u8 mac_addr[6U]; u8 mac_addr_mask[6U]; struct wiphy *wiphy; struct net_device *dev; unsigned long scan_start; struct callback_head callback_head; u32 owner_nlportid; struct ieee80211_channel *channels[0U]; } ; 1634 enum cfg80211_signal_type { CFG80211_SIGNAL_TYPE_NONE = 0, CFG80211_SIGNAL_TYPE_MBM = 1, CFG80211_SIGNAL_TYPE_UNSPEC = 2 } ; 1871 struct cfg80211_ibss_params { const u8 *ssid; const u8 *bssid; struct cfg80211_chan_def chandef; const u8 *ie; u8 ssid_len; u8 ie_len; u16 beacon_interval; u32 basic_rates; bool channel_fixed; bool privacy; bool control_port; bool userspace_handles_dfs; int mcast_rate[3U]; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; } ; 1919 struct cfg80211_bss_select_adjust { enum nl80211_band band; s8 delta; } ; 1930 union __anonunion_param_525 { enum nl80211_band band_pref; struct cfg80211_bss_select_adjust adjust; } ; 1930 struct cfg80211_bss_selection { enum nl80211_bss_select_attr behaviour; union __anonunion_param_525 param; } ; 1946 struct cfg80211_connect_params { struct ieee80211_channel *channel; struct ieee80211_channel *channel_hint; const u8 *bssid; const u8 *bssid_hint; const u8 *ssid; size_t ssid_len; enum nl80211_auth_type auth_type; const u8 *ie; size_t ie_len; bool privacy; enum nl80211_mfp mfp; struct cfg80211_crypto_settings crypto; const u8 *key; u8 key_len; u8 key_idx; u32 flags; int bg_scan_period; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa; struct ieee80211_vht_cap vht_capa_mask; bool pbss; struct cfg80211_bss_selection bss_select; const u8 *prev_bssid; } ; 2049 struct cfg80211_pkt_pattern { const u8 *mask; const u8 *pattern; int pattern_len; int pkt_offset; } ; 2066 struct cfg80211_wowlan_tcp { struct socket *sock; __be32 src; __be32 dst; u16 src_port; u16 dst_port; u8 dst_mac[6U]; int payload_len; const u8 *payload; struct nl80211_wowlan_tcp_data_seq payload_seq; u32 data_interval; u32 wake_len; const u8 *wake_data; const u8 *wake_mask; u32 tokens_size; struct nl80211_wowlan_tcp_data_token payload_tok; } ; 2101 struct cfg80211_wowlan { bool any; bool disconnect; bool magic_pkt; bool gtk_rekey_failure; bool eap_identity_req; bool four_way_handshake; bool rfkill_release; struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; struct cfg80211_sched_scan_request *nd_config; } ; 2219 struct cfg80211_gtk_rekey_data { const u8 *kek; const u8 *kck; const u8 *replay_ctr; } ; 2315 struct cfg80211_nan_conf { u8 master_pref; u8 dual; } ; 2333 struct cfg80211_nan_func_filter { const u8 *filter; u8 len; } ; 2351 struct cfg80211_nan_func { enum nl80211_nan_function_type type; u8 service_id[6U]; u8 publish_type; bool close_range; bool publish_bcast; bool subscribe_active; u8 followup_id; u8 followup_reqid; struct mac_address followup_dest; u32 ttl; const u8 *serv_spec_info; u8 serv_spec_info_len; bool srf_include; const u8 *srf_bf; u8 srf_bf_len; u8 srf_bf_idx; struct mac_address *srf_macs; int srf_num_macs; struct cfg80211_nan_func_filter *rx_filters; struct cfg80211_nan_func_filter *tx_filters; u8 num_tx_filters; u8 num_rx_filters; u8 instance_id; u64 cookie; } ; 3006 struct ieee80211_iface_limit { u16 max; u16 types; } ; 3070 struct ieee80211_iface_combination { const struct ieee80211_iface_limit *limits; u32 num_different_channels; u16 max_interfaces; u8 n_limits; bool beacon_int_infra_match; u8 radar_detect_widths; u8 radar_detect_regions; } ; 3149 struct ieee80211_txrx_stypes { u16 tx; u16 rx; } ; 3165 struct wiphy_wowlan_tcp_support { const struct nl80211_wowlan_tcp_data_token_feature *tok; u32 data_payload_max; u32 data_interval_max; u32 wake_payload_max; bool seq; } ; 3190 struct wiphy_wowlan_support { u32 flags; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; int max_nd_match_sets; const struct wiphy_wowlan_tcp_support *tcp; } ; 3215 struct wiphy_coalesce_support { int n_rules; int max_delay; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; } ; 3240 struct wiphy_vendor_command { struct nl80211_vendor_cmd_info info; u32 flags; int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int); int (*dumpit)(struct wiphy *, struct wireless_dev *, struct sk_buff *, const void *, int, unsigned long *); } ; 3269 struct wiphy_iftype_ext_capab { enum nl80211_iftype iftype; const u8 *extended_capabilities; const u8 *extended_capabilities_mask; u8 extended_capabilities_len; } ; 3289 struct wiphy { u8 perm_addr[6U]; u8 addr_mask[6U]; struct mac_address *addresses; const struct ieee80211_txrx_stypes *mgmt_stypes; const struct ieee80211_iface_combination *iface_combinations; int n_iface_combinations; u16 software_iftypes; u16 n_addresses; u16 interface_modes; u16 max_acl_mac_addrs; u32 flags; u32 regulatory_flags; u32 features; u8 ext_features[2U]; u32 ap_sme_capa; enum cfg80211_signal_type signal_type; int bss_priv_size; u8 max_scan_ssids; u8 max_sched_scan_ssids; u8 max_match_sets; u16 max_scan_ie_len; u16 max_sched_scan_ie_len; u32 max_sched_scan_plans; u32 max_sched_scan_plan_interval; u32 max_sched_scan_plan_iterations; int n_cipher_suites; const u32 *cipher_suites; u8 retry_short; u8 retry_long; u32 frag_threshold; u32 rts_threshold; u8 coverage_class; char fw_version[32U]; u32 hw_version; const struct wiphy_wowlan_support *wowlan; struct cfg80211_wowlan *wowlan_config; u16 max_remain_on_channel_duration; u8 max_num_pmkids; u32 available_antennas_tx; u32 available_antennas_rx; u32 probe_resp_offload; const u8 *extended_capabilities; const u8 *extended_capabilities_mask; u8 extended_capabilities_len; const struct wiphy_iftype_ext_capab *iftype_ext_capab; unsigned int num_iftype_ext_capab; const void *privid; struct ieee80211_supported_band *bands[3U]; void (*reg_notifier)(struct wiphy *, struct regulatory_request *); const struct ieee80211_regdomain *regd; struct device dev; bool registered; struct dentry *debugfsdir; const struct ieee80211_ht_cap *ht_capa_mod_mask; const struct ieee80211_vht_cap *vht_capa_mod_mask; struct list_head wdev_list; possible_net_t _net; const struct iw_handler_def *wext; const struct wiphy_coalesce_support *coalesce; const struct wiphy_vendor_command *vendor_commands; const struct nl80211_vendor_cmd_info *vendor_events; int n_vendor_commands; int n_vendor_events; u16 max_ap_assoc_sta; u8 max_num_csa_counters; u8 max_adj_channel_rssi_comp; u32 bss_select_support; u64 cookie_counter; char priv[0U]; } ; 3707 struct cfg80211_conn ; 3708 struct cfg80211_internal_bss ; 3709 struct cfg80211_cached_keys ; 3710 struct __anonstruct_wext_526 { struct cfg80211_ibss_params ibss; struct cfg80211_connect_params connect; struct cfg80211_cached_keys *keys; const u8 *ie; size_t ie_len; u8 bssid[6U]; u8 prev_bssid[6U]; u8 ssid[32U]; s8 default_key; s8 default_mgmt_key; bool prev_bssid_valid; } ; 3710 struct wireless_dev { struct wiphy *wiphy; enum nl80211_iftype iftype; struct list_head list; struct net_device *netdev; u32 identifier; struct list_head mgmt_registrations; spinlock_t mgmt_registrations_lock; struct mutex mtx; bool use_4addr; bool p2p_started; bool nan_started; u8 address[6U]; u8 ssid[32U]; u8 ssid_len; u8 mesh_id_len; u8 mesh_id_up_len; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; enum ieee80211_bss_type conn_bss_type; struct list_head event_list; spinlock_t event_lock; struct cfg80211_internal_bss *current_bss; struct cfg80211_chan_def preset_chandef; struct cfg80211_chan_def chandef; bool ibss_fixed; bool ibss_dfs_possible; bool ps; int ps_timeout; int beacon_interval; u32 ap_unexpected_nlportid; bool cac_started; unsigned long cac_start_time; unsigned int cac_time_ms; u32 owner_nlportid; struct __anonstruct_wext_526 wext; } ; 519 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ; 27 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ; 41 struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; } ; 51 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ; 77 struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; } ; 13 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; } ; 87 struct nla_policy { u16 type; u16 len; } ; 25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ; 158 struct Qdisc_ops ; 159 struct qdisc_walker ; 160 struct tcf_walker ; 30 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ; 38 struct qdisc_skb_head { struct sk_buff *head; struct sk_buff *tail; __u32 qlen; spinlock_t lock; } ; 46 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct hlist_node hash; u32 handle; u32 parent; void *u32_node; struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; struct sk_buff *gso_skb; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; struct sk_buff *skb_bad_txq; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ; 134 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 ); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ; 166 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ; 191 struct tcf_result { unsigned long class; u32 classid; } ; 197 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); bool (*destroy)(struct tcf_proto *, bool ); unsigned long int (*get)(struct tcf_proto *, u32 ); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ; 222 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct callback_head rcu; } ; 846 struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); } ; 103 struct page_counter { atomic_long_t count; unsigned long limit; struct page_counter *parent; unsigned long watermark; unsigned long failcnt; } ; 33 struct eventfd_ctx ; 41 struct vmpressure { unsigned long scanned; unsigned long reclaimed; unsigned long tree_scanned; unsigned long tree_reclaimed; struct spinlock sr_lock; struct list_head events; struct mutex events_lock; struct work_struct work; } ; 44 struct fprop_global { struct percpu_counter events; unsigned int period; seqcount_t sequence; } ; 72 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ; 32 typedef int congested_fn(void *, int); 41 struct bdi_writeback_congested { unsigned long state; atomic_t refcnt; struct backing_dev_info *bdi; int blkcg_id; struct rb_node rb_node; } ; 60 union __anonunion____missing_field_name_541 { struct work_struct release_work; struct callback_head rcu; } ; 60 struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; struct percpu_counter stat[4U]; struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; union __anonunion____missing_field_name_541 __annonCompField111; } ; 134 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned int capabilities; congested_fn *congested_fn; void *congested_data; char *name; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; struct list_head wb_list; struct radix_tree_root cgwb_tree; struct rb_root cgwb_congested_tree; atomic_t usage_cnt; wait_queue_head_t wb_waitq; struct device *dev; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ; 14 enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ; 31 struct writeback_control { long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned char for_kupdate; unsigned char for_background; unsigned char tagged_writepages; unsigned char for_reclaim; unsigned char range_cyclic; unsigned char for_sync; struct bdi_writeback *wb; struct inode *inode; int wb_id; int wb_lcand_id; int wb_tcand_id; size_t wb_bytes; size_t wb_lcand_bytes; size_t wb_tcand_bytes; } ; 101 struct wb_domain { spinlock_t lock; struct fprop_global completions; struct timer_list period_timer; unsigned long period_time; unsigned long dirty_limit_tstamp; unsigned long dirty_limit; } ; 12 typedef void * mempool_alloc_t(gfp_t , void *); 13 typedef void mempool_free_t(void *, void *); 14 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ; 25 typedef struct mempool_s mempool_t; 79 union __anonunion____missing_field_name_542 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ; 79 union __anonunion____missing_field_name_543 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ; 79 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_542 __annonCompField112; union __anonunion____missing_field_name_543 __annonCompField113; unsigned int flags; } ; 92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ; 299 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ; 534 struct bio_list { struct bio *head; struct bio *tail; } ; 666 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ; 87 struct mem_cgroup_id { int id; atomic_t ref; } ; 104 struct mem_cgroup_stat_cpu { long count[11U]; unsigned long events[8U]; unsigned long nr_page_events; unsigned long targets[3U]; } ; 111 struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; unsigned int generation; } ; 117 struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[5U]; struct mem_cgroup_reclaim_iter iter[13U]; struct rb_node tree_node; unsigned long usage_in_excess; bool on_tree; struct mem_cgroup *memcg; } ; 133 struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; } ; 139 struct mem_cgroup_threshold_ary { int current_threshold; unsigned int size; struct mem_cgroup_threshold entries[0U]; } ; 149 struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *primary; struct mem_cgroup_threshold_ary *spare; } ; 160 enum memcg_kmem_state { KMEM_NONE = 0, KMEM_ALLOCATED = 1, KMEM_ONLINE = 2 } ; 166 struct mem_cgroup { struct cgroup_subsys_state css; struct mem_cgroup_id id; struct page_counter memory; struct page_counter swap; struct page_counter memsw; struct page_counter kmem; struct page_counter tcpmem; unsigned long low; unsigned long high; struct work_struct high_work; unsigned long soft_limit; struct vmpressure vmpressure; bool use_hierarchy; bool oom_lock; int under_oom; int swappiness; int oom_kill_disable; struct cgroup_file events_file; struct mutex thresholds_lock; struct mem_cgroup_thresholds thresholds; struct mem_cgroup_thresholds memsw_thresholds; struct list_head oom_notify; unsigned long move_charge_at_immigrate; atomic_t moving_account; spinlock_t move_lock; struct task_struct *move_lock_task; unsigned long move_lock_flags; struct mem_cgroup_stat_cpu *stat; unsigned long socket_pressure; bool tcpmem_active; int tcpmem_pressure; int kmemcg_id; enum memcg_kmem_state kmem_state; int last_scanned_node; nodemask_t scan_nodes; atomic_t numainfo_events; atomic_t numainfo_updating; struct list_head cgwb_list; struct wb_domain cgwb_domain; struct list_head event_list; spinlock_t event_list_lock; struct mem_cgroup_per_node *nodeinfo[0U]; } ; 853 struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; } ; 49 struct bpf_insn { __u8 code; unsigned char dst_reg; unsigned char src_reg; __s16 off; __s32 imm; } ; 88 enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6, BPF_PROG_TYPE_PERF_EVENT = 7 } ; 523 struct bpf_prog_aux ; 387 struct sock_fprog_kern { u16 len; struct sock_filter *filter; } ; 398 union __anonunion____missing_field_name_554 { struct sock_filter insns[0U]; struct bpf_insn insnsi[0U]; } ; 398 struct bpf_prog { u16 pages; unsigned char jited; unsigned char gpl_compatible; unsigned char cb_access; unsigned char dst_needed; u32 len; enum bpf_prog_type type; struct bpf_prog_aux *aux; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *, const struct bpf_insn *); union __anonunion____missing_field_name_554 __annonCompField120; } ; 419 struct sk_filter { atomic_t refcnt; struct callback_head rcu; struct bpf_prog *prog; } ; 138 struct pollfd { int fd; short events; short revents; } ; 32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ; 187 struct neigh_table ; 187 struct neigh_parms { possible_net_t net; struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; atomic_t refcnt; struct callback_head callback_head; int reachable_time; int data[13U]; unsigned long data_state[1U]; } ; 110 struct neigh_statistics { unsigned long allocs; unsigned long destroys; unsigned long hash_grows; unsigned long res_failed; unsigned long lookups; unsigned long hits; unsigned long rcv_probes_mcast; unsigned long rcv_probes_ucast; unsigned long periodic_gc_runs; unsigned long forced_gc_runs; unsigned long unres_discards; unsigned long table_fulls; } ; 130 struct neigh_ops ; 130 struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; __u8 flags; __u8 nud_state; __u8 type; __u8 dead; seqlock_t ha_lock; unsigned char ha[32U]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct callback_head rcu; struct net_device *dev; u8 primary_key[0U]; } ; 159 struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); } ; 167 struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; u8 flags; u8 key[0U]; } ; 175 struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4U]; struct callback_head rcu; } ; 188 struct neigh_table { int family; int entry_size; int key_len; __be16 protocol; __u32 (*hash)(const void *, const struct net_device *, __u32 *); bool (*key_eq)(const struct neighbour *, const void *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; rwlock_t lock; unsigned long last_rand; struct neigh_statistics *stats; struct neigh_hash_table *nht; struct pneigh_entry **phash_buckets; } ; 520 struct lwtunnel_state ; 520 struct dn_route ; 520 union __anonunion____missing_field_name_570 { struct dst_entry *next; struct rtable *rt_next; struct rt6_info *rt6_next; struct dn_route *dn_next; } ; 520 struct dst_entry { struct callback_head callback_head; struct dst_entry *child; struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; struct dst_entry *path; struct dst_entry *from; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct net *, struct sock *, struct sk_buff *); unsigned short flags; unsigned short pending_confirm; short error; short obsolete; unsigned short header_len; unsigned short trailer_len; __u32 tclassid; long __pad_to_align_refcnt[2U]; atomic_t __refcnt; int __use; unsigned long lastuse; struct lwtunnel_state *lwtstate; union __anonunion____missing_field_name_570 __annonCompField121; } ; 110 struct __anonstruct_socket_lock_t_571 { spinlock_t slock; int owned; wait_queue_head_t wq; struct lockdep_map dep_map; } ; 110 typedef struct __anonstruct_socket_lock_t_571 socket_lock_t; 110 struct proto ; 116 typedef __u32 __portpair; 117 typedef __u64 __addrpair; 118 struct __anonstruct____missing_field_name_573 { __be32 skc_daddr; __be32 skc_rcv_saddr; } ; 118 union __anonunion____missing_field_name_572 { __addrpair skc_addrpair; struct __anonstruct____missing_field_name_573 __annonCompField122; } ; 118 union __anonunion____missing_field_name_574 { unsigned int skc_hash; __u16 skc_u16hashes[2U]; } ; 118 struct __anonstruct____missing_field_name_576 { __be16 skc_dport; __u16 skc_num; } ; 118 union __anonunion____missing_field_name_575 { __portpair skc_portpair; struct __anonstruct____missing_field_name_576 __annonCompField125; } ; 118 union __anonunion____missing_field_name_577 { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; } ; 118 struct inet_timewait_death_row ; 118 union __anonunion____missing_field_name_578 { unsigned long skc_flags; struct sock *skc_listener; struct inet_timewait_death_row *skc_tw_dr; } ; 118 union __anonunion____missing_field_name_579 { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; } ; 118 union __anonunion____missing_field_name_580 { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; } ; 118 union __anonunion____missing_field_name_581 { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; } ; 118 struct sock_common { union __anonunion____missing_field_name_572 __annonCompField123; union __anonunion____missing_field_name_574 __annonCompField124; union __anonunion____missing_field_name_575 __annonCompField126; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; unsigned char skc_reuseport; unsigned char skc_ipv6only; unsigned char skc_net_refcnt; int skc_bound_dev_if; union __anonunion____missing_field_name_577 __annonCompField127; struct proto *skc_prot; possible_net_t skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; atomic64_t skc_cookie; union __anonunion____missing_field_name_578 __annonCompField128; int skc_dontcopy_begin[0U]; union __anonunion____missing_field_name_579 __annonCompField129; int skc_tx_queue_mapping; union __anonunion____missing_field_name_580 __annonCompField130; atomic_t skc_refcnt; int skc_dontcopy_end[0U]; union __anonunion____missing_field_name_581 __annonCompField131; } ; 230 struct __anonstruct_sk_backlog_582 { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } ; 230 union __anonunion____missing_field_name_583 { struct socket_wq *sk_wq; struct socket_wq *sk_wq_raw; } ; 230 struct sock_reuseport ; 230 struct sock { struct sock_common __sk_common; socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; struct __anonstruct_sk_backlog_582 sk_backlog; int sk_forward_alloc; __u32 sk_txhash; unsigned int sk_napi_id; unsigned int sk_ll_usec; atomic_t sk_drops; int sk_rcvbuf; struct sk_filter *sk_filter; union __anonunion____missing_field_name_583 __annonCompField132; struct xfrm_policy *sk_policy[2U]; struct dst_entry *sk_rx_dst; struct dst_entry *sk_dst_cache; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_write_queue; unsigned char sk_padding; unsigned char sk_no_check_tx; unsigned char sk_no_check_rx; unsigned char sk_userlocks; unsigned char sk_protocol; unsigned short sk_type; int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; u32 sk_max_pacing_rate; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; __u32 sk_priority; __u32 sk_mark; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; u8 sk_shutdown; u32 sk_tskey; struct socket *sk_socket; void *sk_user_data; struct page_frag sk_frag; struct sk_buff *sk_send_head; __s32 sk_peek_off; int sk_write_pending; void *sk_security; struct sock_cgroup_data sk_cgrp_data; struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); struct sock_reuseport *sk_reuseport_cb; struct callback_head sk_rcu; } ; 948 struct request_sock_ops ; 949 struct timewait_sock_ops ; 950 struct inet_hashinfo ; 951 struct raw_hashinfo ; 965 struct udp_table ; 965 union __anonunion_h_594 { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; } ; 965 struct proto { void (*close)(struct sock *, long); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, int, int *); int (*ioctl)(struct sock *, int, unsigned long); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, char *, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct sock *, int, int, char *, int *); int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); int (*sendmsg)(struct sock *, struct msghdr *, size_t ); int (*recvmsg)(struct sock *, struct msghdr *, size_t , int, int, int *); int (*sendpage)(struct sock *, struct page *, int, size_t , int); int (*bind)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); void (*release_cb)(struct sock *); int (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, unsigned short); unsigned int inuse_idx; bool (*stream_memory_free)(const struct sock *); void (*enter_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; struct percpu_counter *sockets_allocated; int *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; int slab_flags; struct percpu_counter *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union __anonunion_h_594 h; struct module *owner; char name[32U]; struct list_head node; int (*diag_destroy)(struct sock *, int); } ; 2283 struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *, struct request_sock *); void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(const struct sock *, struct sk_buff *); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(const struct request_sock *); } ; 46 struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; unsigned char cookie_ts; unsigned char num_timeout; u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 *saved_syn; u32 secid; u32 peer_secid; } ; 18 struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; __u8 res2; __u8 action; __u32 flags; } ; 68 struct fib_rule { struct list_head list; int iifindex; int oifindex; u32 mark; u32 mark_mask; u32 flags; u32 table; u8 action; u8 l3mdev; u32 target; __be64 tun_id; struct fib_rule *ctarget; struct net *fr_net; atomic_t refcnt; u32 pref; int suppress_ifgroup; int suppress_prefixlen; char iifname[16U]; char oifname[16U]; struct callback_head rcu; } ; 35 struct fib_lookup_arg { void *lookup_ptr; void *result; struct fib_rule *rule; u32 table; int flags; } ; 43 struct fib_rules_ops { int family; struct list_head list; int rule_size; int addr_size; int unresolved_rules; int nr_goto_rules; int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **); int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); size_t (*nlmsg_payload)(struct fib_rule *); void (*flush_cache)(struct fib_rules_ops *); int nlgroup; const struct nla_policy *policy; struct list_head rules_list; struct module *owner; struct net *fro_net; struct callback_head rcu; } ; 140 struct l3mdev_ops { u32 (*l3mdev_fib_table)(const struct net_device *); struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16 ); struct sk_buff * (*l3mdev_l3_out)(struct net_device *, struct sock *, struct sk_buff *, u16 ); struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *, struct flowi6 *); } ; 181 struct ipv6_stable_secret { bool initialized; struct in6_addr secret; } ; 65 struct ipv6_devconf { __s32 forwarding; __s32 hop_limit; __s32 mtu6; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_max_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_max_plen; __s32 proxy_ndp; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 optimistic_dad; __s32 use_optimistic; __s32 mc_forwarding; __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; struct ipv6_stable_secret stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; struct ctl_table_header *sysctl_header; } ; 328 struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *, struct sock *, void *); void (*twsk_destructor)(struct sock *); } ; 39 struct inet_timewait_death_row { atomic_t tw_count; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_max_tw_buckets; } ; 62 typedef u32 codel_time_t; 174 struct ieee80211_tx_queue_params { u16 txop; u16 cw_min; u16 cw_max; u8 aifs; bool acm; bool uapsd; } ; 174 struct ieee80211_low_level_stats { unsigned int dot11ACKFailureCount; unsigned int dot11RTSFailureCount; unsigned int dot11FCSErrorCount; unsigned int dot11RTSSuccessCount; } ; 189 struct ieee80211_chanctx_conf { struct cfg80211_chan_def def; struct cfg80211_chan_def min_def; u8 rx_chains_static; u8 rx_chains_dynamic; bool radar_enabled; u8 drv_priv[0U]; } ; 226 enum ieee80211_chanctx_switch_mode { CHANCTX_SWMODE_REASSIGN_VIF = 0, CHANCTX_SWMODE_SWAP_CONTEXTS = 1 } ; 231 struct ieee80211_vif ; 231 struct ieee80211_vif_chanctx_switch { struct ieee80211_vif *vif; struct ieee80211_chanctx_conf *old_ctx; struct ieee80211_chanctx_conf *new_ctx; } ; 290 enum ieee80211_event_type { RSSI_EVENT = 0, MLME_EVENT = 1, BAR_RX_EVENT = 2, BA_FRAME_TIMEOUT = 3 } ; 297 enum ieee80211_rssi_event_data { RSSI_EVENT_HIGH = 0, RSSI_EVENT_LOW = 1 } ; 302 struct ieee80211_rssi_event { enum ieee80211_rssi_event_data data; } ; 373 enum ieee80211_mlme_event_data { AUTH_EVENT = 0, ASSOC_EVENT = 1, DEAUTH_RX_EVENT = 2, DEAUTH_TX_EVENT = 3 } ; 380 enum ieee80211_mlme_event_status { MLME_SUCCESS = 0, MLME_DENIED = 1, MLME_TIMEOUT = 2 } ; 386 struct ieee80211_mlme_event { enum ieee80211_mlme_event_data data; enum ieee80211_mlme_event_status status; u16 reason; } ; 411 struct ieee80211_sta ; 411 struct ieee80211_ba_event { struct ieee80211_sta *sta; u16 tid; u16 ssn; } ; 423 union __anonunion_u_630 { struct ieee80211_rssi_event rssi; struct ieee80211_mlme_event mlme; struct ieee80211_ba_event ba; } ; 423 struct ieee80211_event { enum ieee80211_event_type type; union __anonunion_u_630 u; } ; 440 struct ieee80211_mu_group_data { u8 membership[8U]; u8 position[16U]; } ; 453 struct ieee80211_bss_conf { const u8 *bssid; bool assoc; bool ibss_joined; bool ibss_creator; u16 aid; bool use_cts_prot; bool use_short_preamble; bool use_short_slot; bool enable_beacon; u8 dtim_period; u16 beacon_int; u16 assoc_capability; u64 sync_tsf; u32 sync_device_ts; u8 sync_dtim_count; u32 basic_rates; struct ieee80211_rate *beacon_rate; int mcast_rate[3U]; u16 ht_operation_mode; s32 cqm_rssi_thold; u32 cqm_rssi_hyst; struct cfg80211_chan_def chandef; struct ieee80211_mu_group_data mu_group; __be32 arp_addr_list[4U]; int arp_addr_cnt; bool qos; bool idle; bool ps; u8 ssid[32U]; size_t ssid_len; bool hidden_ssid; int txpower; enum nl80211_tx_power_setting txpower_type; struct ieee80211_p2p_noa_attr p2p_noa_attr; bool allow_p2p_go_ps; } ; 628 struct ieee80211_tx_rate { s8 idx; unsigned char count; unsigned short flags; } ; 852 struct __anonstruct____missing_field_name_634 { struct ieee80211_tx_rate rates[4U]; s8 rts_cts_rate_idx; unsigned char use_rts; unsigned char use_cts_prot; unsigned char short_preamble; unsigned char skip_table; } ; 852 union __anonunion____missing_field_name_633 { struct __anonstruct____missing_field_name_634 __annonCompField135; unsigned long jiffies; } ; 852 union __anonunion____missing_field_name_635 { struct ieee80211_vif *vif; codel_time_t enqueue_time; } ; 852 struct ieee80211_key_conf ; 852 struct __anonstruct_control_632 { union __anonunion____missing_field_name_633 __annonCompField136; union __anonunion____missing_field_name_635 __annonCompField137; struct ieee80211_key_conf *hw_key; u32 flags; } ; 852 struct __anonstruct_ack_636 { u64 cookie; } ; 852 struct __anonstruct_status_637 { struct ieee80211_tx_rate rates[4U]; s32 ack_signal; u8 ampdu_ack_len; u8 ampdu_len; u8 antenna; u16 tx_time; void *status_driver_data[2U]; } ; 852 struct __anonstruct____missing_field_name_638 { struct ieee80211_tx_rate driver_rates[4U]; u8 pad[4U]; void *rate_driver_data[3U]; } ; 852 union __anonunion____missing_field_name_631 { struct __anonstruct_control_632 control; struct __anonstruct_ack_636 ack; struct __anonstruct_status_637 status; struct __anonstruct____missing_field_name_638 __annonCompField138; void *driver_data[5U]; } ; 852 struct ieee80211_tx_info { u32 flags; u8 band; u8 hw_queue; u16 ack_frame_id; union __anonunion____missing_field_name_631 __annonCompField139; } ; 940 struct ieee80211_scan_ies { const u8 *ies[3U]; size_t len[3U]; const u8 *common_ies; size_t common_ie_len; } ; 967 struct ieee80211_rx_status ; 1046 struct ieee80211_rx_status { u64 mactime; u64 boottime_ns; u32 device_timestamp; u32 ampdu_reference; u64 flag; u16 freq; u8 vht_flag; u8 rate_idx; u8 vht_nss; u8 rx_flags; u8 band; u8 antenna; s8 signal; u8 chains; s8 chain_signal[4U]; u8 ampdu_delimiter_crc; } ; 1247 enum ieee80211_smps_mode { IEEE80211_SMPS_AUTOMATIC = 0, IEEE80211_SMPS_OFF = 1, IEEE80211_SMPS_STATIC = 2, IEEE80211_SMPS_DYNAMIC = 3, IEEE80211_SMPS_NUM_MODES = 4 } ; 1255 struct ieee80211_conf { u32 flags; int power_level; int dynamic_ps_timeout; u16 listen_interval; u8 ps_dtim_period; u8 long_frame_max_tx_count; u8 short_frame_max_tx_count; struct cfg80211_chan_def chandef; bool radar_enabled; enum ieee80211_smps_mode smps_mode; } ; 1352 struct ieee80211_channel_switch { u64 timestamp; u32 device_timestamp; bool block_tx; struct cfg80211_chan_def chandef; u8 count; } ; 1384 struct ieee80211_txq ; 1384 struct ieee80211_vif { enum nl80211_iftype type; struct ieee80211_bss_conf bss_conf; u8 addr[6U]; bool p2p; bool csa_active; bool mu_mimo_owner; u8 cab_queue; u8 hw_queue[4U]; struct ieee80211_txq *txq; struct ieee80211_chanctx_conf *chanctx_conf; u32 driver_flags; struct dentry *debugfs_dir; unsigned int probe_req_reg; u8 drv_priv[0U]; } ; 1509 struct ieee80211_key_conf { atomic64_t tx_pn; u32 cipher; u8 icv_len; u8 iv_len; u8 hw_key_idx; u8 flags; s8 keyidx; u8 keylen; u8 key[0U]; } ; 1582 struct __anonstruct_tkip_640 { u32 iv32; u16 iv16; } ; 1582 struct __anonstruct_ccmp_641 { u8 pn[6U]; } ; 1582 struct __anonstruct_aes_cmac_642 { u8 pn[6U]; } ; 1582 struct __anonstruct_aes_gmac_643 { u8 pn[6U]; } ; 1582 struct __anonstruct_gcmp_644 { u8 pn[6U]; } ; 1582 struct __anonstruct_hw_645 { u8 seq[16U]; u8 seq_len; } ; 1582 union __anonunion____missing_field_name_639 { struct __anonstruct_tkip_640 tkip; struct __anonstruct_ccmp_641 ccmp; struct __anonstruct_aes_cmac_642 aes_cmac; struct __anonstruct_aes_gmac_643 aes_gmac; struct __anonstruct_gcmp_644 gcmp; struct __anonstruct_hw_645 hw; } ; 1582 struct ieee80211_key_seq { union __anonunion____missing_field_name_639 __annonCompField140; } ; 1626 struct ieee80211_cipher_scheme { u32 cipher; u16 iftype; u8 hdr_len; u8 pn_len; u8 pn_off; u8 key_idx_off; u8 key_idx_mask; u8 key_idx_shift; u8 mic_len; } ; 1656 enum set_key_cmd { SET_KEY = 0, DISABLE_KEY = 1 } ; 1661 enum ieee80211_sta_state { IEEE80211_STA_NOTEXIST = 0, IEEE80211_STA_NONE = 1, IEEE80211_STA_AUTH = 2, IEEE80211_STA_ASSOC = 3, IEEE80211_STA_AUTHORIZED = 4 } ; 1669 enum ieee80211_sta_rx_bandwidth { IEEE80211_STA_RX_BW_20 = 0, IEEE80211_STA_RX_BW_40 = 1, IEEE80211_STA_RX_BW_80 = 2, IEEE80211_STA_RX_BW_160 = 3 } ; 1676 struct __anonstruct_rate_646 { s8 idx; u8 count; u8 count_cts; u8 count_rts; u16 flags; } ; 1676 struct ieee80211_sta_rates { struct callback_head callback_head; struct __anonstruct_rate_646 rate[4U]; } ; 1724 struct ieee80211_sta { u32 supp_rates[3U]; u8 addr[6U]; u16 aid; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; u8 max_rx_aggregation_subframes; bool wme; u8 uapsd_queues; u8 max_sp; u8 rx_nss; enum ieee80211_sta_rx_bandwidth bandwidth; enum ieee80211_smps_mode smps_mode; struct ieee80211_sta_rates *rates; bool tdls; bool tdls_initiator; bool mfp; u8 max_amsdu_subframes; u16 max_amsdu_len; bool support_p2p_ps; u16 max_rc_amsdu_len; struct ieee80211_txq *txq[16U]; u8 drv_priv[0U]; } ; 1804 enum sta_notify_cmd { STA_NOTIFY_SLEEP = 0, STA_NOTIFY_AWAKE = 1 } ; 1809 struct ieee80211_tx_control { struct ieee80211_sta *sta; } ; 1827 struct ieee80211_txq { struct ieee80211_vif *vif; struct ieee80211_sta *sta; u8 tid; u8 ac; u8 drv_priv[0U]; } ; 1849 enum ieee80211_hw_flags { IEEE80211_HW_HAS_RATE_CONTROL = 0, IEEE80211_HW_RX_INCLUDES_FCS = 1, IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 2, IEEE80211_HW_SIGNAL_UNSPEC = 3, IEEE80211_HW_SIGNAL_DBM = 4, IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 5, IEEE80211_HW_SPECTRUM_MGMT = 6, IEEE80211_HW_AMPDU_AGGREGATION = 7, IEEE80211_HW_SUPPORTS_PS = 8, IEEE80211_HW_PS_NULLFUNC_STACK = 9, IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 10, IEEE80211_HW_MFP_CAPABLE = 11, IEEE80211_HW_WANT_MONITOR_VIF = 12, IEEE80211_HW_NO_AUTO_VIF = 13, IEEE80211_HW_SW_CRYPTO_CONTROL = 14, IEEE80211_HW_SUPPORT_FAST_XMIT = 15, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 16, IEEE80211_HW_CONNECTION_MONITOR = 17, IEEE80211_HW_QUEUE_CONTROL = 18, IEEE80211_HW_SUPPORTS_PER_STA_GTK = 19, IEEE80211_HW_AP_LINK_PS = 20, IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 21, IEEE80211_HW_SUPPORTS_RC_TABLE = 22, IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 23, IEEE80211_HW_TIMING_BEACON_ONLY = 24, IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 25, IEEE80211_HW_CHANCTX_STA_CSA = 26, IEEE80211_HW_SUPPORTS_CLONED_SKBS = 27, IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS = 28, IEEE80211_HW_TDLS_WIDER_BW = 29, IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU = 30, IEEE80211_HW_BEACON_TX_STATUS = 31, IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR = 32, IEEE80211_HW_SUPPORTS_REORDERING_BUFFER = 33, IEEE80211_HW_USES_RSS = 34, IEEE80211_HW_TX_AMSDU = 35, IEEE80211_HW_TX_FRAG_LIST = 36, IEEE80211_HW_REPORTS_LOW_ACK = 37, NUM_IEEE80211_HW_FLAGS = 38 } ; 1891 struct __anonstruct_radiotap_timestamp_647 { int units_pos; s16 accuracy; } ; 1891 struct ieee80211_hw { struct ieee80211_conf conf; struct wiphy *wiphy; const char *rate_control_algorithm; void *priv; unsigned long flags[1U]; unsigned int extra_tx_headroom; unsigned int extra_beacon_tailroom; int vif_data_size; int sta_data_size; int chanctx_data_size; int txq_data_size; u16 queues; u16 max_listen_interval; s8 max_signal; u8 max_rates; u8 max_report_rates; u8 max_rate_tries; u8 max_rx_aggregation_subframes; u8 max_tx_aggregation_subframes; u8 max_tx_fragments; u8 offchannel_tx_hw_queue; u8 radiotap_mcs_details; u16 radiotap_vht_details; struct __anonstruct_radiotap_timestamp_647 radiotap_timestamp; netdev_features_t netdev_features; u8 uapsd_queues; u8 uapsd_max_sp_len; u8 n_cipher_schemes; const struct ieee80211_cipher_scheme *cipher_schemes; u8 max_nan_de_entries; } ; 2233 struct ieee80211_scan_request { struct ieee80211_scan_ies ies; struct cfg80211_scan_request req; } ; 2247 struct ieee80211_tdls_ch_sw_params { struct ieee80211_sta *sta; struct cfg80211_chan_def *chandef; u8 action_code; u32 status; u32 timestamp; u16 switch_time; u16 switch_timeout; struct sk_buff *tmpl_skb; u32 ch_sw_tm_ie; } ; 2357 enum ieee80211_ampdu_mlme_action { IEEE80211_AMPDU_RX_START = 0, IEEE80211_AMPDU_RX_STOP = 1, IEEE80211_AMPDU_TX_START = 2, IEEE80211_AMPDU_TX_STOP_CONT = 3, IEEE80211_AMPDU_TX_STOP_FLUSH = 4, IEEE80211_AMPDU_TX_STOP_FLUSH_CONT = 5, IEEE80211_AMPDU_TX_OPERATIONAL = 6 } ; 2367 struct ieee80211_ampdu_params { enum ieee80211_ampdu_mlme_action action; struct ieee80211_sta *sta; u16 tid; u16 ssn; u8 buf_size; bool amsdu; u16 timeout; } ; 2847 enum ieee80211_frame_release_type { IEEE80211_FRAME_RELEASE_PSPOLL = 0, IEEE80211_FRAME_RELEASE_UAPSD = 1 } ; 2859 enum ieee80211_roc_type { IEEE80211_ROC_TYPE_NORMAL = 0, IEEE80211_ROC_TYPE_MGMT_TX = 1 } ; 2864 enum ieee80211_reconfig_type { IEEE80211_RECONFIG_TYPE_RESTART = 0, IEEE80211_RECONFIG_TYPE_SUSPEND = 1 } ; 2869 struct ieee80211_ops { void (*tx)(struct ieee80211_hw *, struct ieee80211_tx_control *, struct sk_buff *); int (*start)(struct ieee80211_hw *); void (*stop)(struct ieee80211_hw *); int (*suspend)(struct ieee80211_hw *, struct cfg80211_wowlan *); int (*resume)(struct ieee80211_hw *); void (*set_wakeup)(struct ieee80211_hw *, bool ); int (*add_interface)(struct ieee80211_hw *, struct ieee80211_vif *); int (*change_interface)(struct ieee80211_hw *, struct ieee80211_vif *, enum nl80211_iftype , bool ); void (*remove_interface)(struct ieee80211_hw *, struct ieee80211_vif *); int (*config)(struct ieee80211_hw *, u32 ); void (*bss_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u32 ); int (*start_ap)(struct ieee80211_hw *, struct ieee80211_vif *); void (*stop_ap)(struct ieee80211_hw *, struct ieee80211_vif *); u64 (*prepare_multicast)(struct ieee80211_hw *, struct netdev_hw_addr_list *); void (*configure_filter)(struct ieee80211_hw *, unsigned int, unsigned int *, u64 ); void (*config_iface_filter)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int, unsigned int); int (*set_tim)(struct ieee80211_hw *, struct ieee80211_sta *, bool ); int (*set_key)(struct ieee80211_hw *, enum set_key_cmd , struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *); void (*update_tkip_key)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_key_conf *, struct ieee80211_sta *, u32 , u16 *); void (*set_rekey_data)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_gtk_rekey_data *); void (*set_default_unicast_key)(struct ieee80211_hw *, struct ieee80211_vif *, int); int (*hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_scan_request *); void (*cancel_hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *); int (*sched_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_sched_scan_request *, struct ieee80211_scan_ies *); int (*sched_scan_stop)(struct ieee80211_hw *, struct ieee80211_vif *); void (*sw_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, const u8 *); void (*sw_scan_complete)(struct ieee80211_hw *, struct ieee80211_vif *); int (*get_stats)(struct ieee80211_hw *, struct ieee80211_low_level_stats *); void (*get_key_seq)(struct ieee80211_hw *, struct ieee80211_key_conf *, struct ieee80211_key_seq *); int (*set_frag_threshold)(struct ieee80211_hw *, u32 ); int (*set_rts_threshold)(struct ieee80211_hw *, u32 ); int (*sta_add)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int (*sta_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*sta_add_debugfs)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct dentry *); void (*sta_notify)(struct ieee80211_hw *, struct ieee80211_vif *, enum sta_notify_cmd , struct ieee80211_sta *); int (*sta_state)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, enum ieee80211_sta_state , enum ieee80211_sta_state ); void (*sta_pre_rcu_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*sta_rc_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u32 ); void (*sta_rate_tbl_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*sta_statistics)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct station_info *); int (*conf_tx)(struct ieee80211_hw *, struct ieee80211_vif *, u16 , const struct ieee80211_tx_queue_params *); u64 (*get_tsf)(struct ieee80211_hw *, struct ieee80211_vif *); void (*set_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, u64 ); void (*offset_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, s64 ); void (*reset_tsf)(struct ieee80211_hw *, struct ieee80211_vif *); int (*tx_last_beacon)(struct ieee80211_hw *); int (*ampdu_action)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_ampdu_params *); int (*get_survey)(struct ieee80211_hw *, int, struct survey_info *); void (*rfkill_poll)(struct ieee80211_hw *); void (*set_coverage_class)(struct ieee80211_hw *, s16 ); int (*testmode_cmd)(struct ieee80211_hw *, struct ieee80211_vif *, void *, int); int (*testmode_dump)(struct ieee80211_hw *, struct sk_buff *, struct netlink_callback *, void *, int); void (*flush)(struct ieee80211_hw *, struct ieee80211_vif *, u32 , bool ); void (*channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); int (*set_antenna)(struct ieee80211_hw *, u32 , u32 ); int (*get_antenna)(struct ieee80211_hw *, u32 *, u32 *); int (*remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel *, int, enum ieee80211_roc_type ); int (*cancel_remain_on_channel)(struct ieee80211_hw *); int (*set_ringparam)(struct ieee80211_hw *, u32 , u32 ); void (*get_ringparam)(struct ieee80211_hw *, u32 *, u32 *, u32 *, u32 *); bool (*tx_frames_pending)(struct ieee80211_hw *); int (*set_bitrate_mask)(struct ieee80211_hw *, struct ieee80211_vif *, const struct cfg80211_bitrate_mask *); void (*event_callback)(struct ieee80211_hw *, struct ieee80211_vif *, const struct ieee80211_event *); void (*allow_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16 , int, enum ieee80211_frame_release_type , bool ); void (*release_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16 , int, enum ieee80211_frame_release_type , bool ); int (*get_et_sset_count)(struct ieee80211_hw *, struct ieee80211_vif *, int); void (*get_et_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct ethtool_stats *, u64 *); void (*get_et_strings)(struct ieee80211_hw *, struct ieee80211_vif *, u32 , u8 *); void (*mgd_prepare_tx)(struct ieee80211_hw *, struct ieee80211_vif *); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *, struct ieee80211_vif *); int (*add_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void (*remove_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void (*change_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, u32 ); int (*assign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf *); void (*unassign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf *); int (*switch_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif_chanctx_switch *, int, enum ieee80211_chanctx_switch_mode ); void (*reconfig_complete)(struct ieee80211_hw *, enum ieee80211_reconfig_type ); void (*ipv6_addr_change)(struct ieee80211_hw *, struct ieee80211_vif *, struct inet6_dev *); void (*channel_switch_beacon)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_chan_def *); int (*pre_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); int (*post_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *); int (*join_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); void (*leave_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); u32 (*get_expected_throughput)(struct ieee80211_hw *, struct ieee80211_sta *); int (*get_txpower)(struct ieee80211_hw *, struct ieee80211_vif *, int *); int (*tdls_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8 , struct cfg80211_chan_def *, struct sk_buff *, u32 ); void (*tdls_cancel_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*tdls_recv_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_tdls_ch_sw_params *); void (*wake_tx_queue)(struct ieee80211_hw *, struct ieee80211_txq *); void (*sync_rx_queues)(struct ieee80211_hw *); int (*start_nan)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_nan_conf *); int (*stop_nan)(struct ieee80211_hw *, struct ieee80211_vif *); int (*nan_change_conf)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_nan_conf *, u32 ); int (*add_nan_func)(struct ieee80211_hw *, struct ieee80211_vif *, const struct cfg80211_nan_func *); void (*del_nan_func)(struct ieee80211_hw *, struct ieee80211_vif *, u8 ); } ; 5803 struct adm8211_csr { __le32 PAR; __le32 FRCTL; __le32 TDR; __le32 WTDP; __le32 RDR; __le32 WRDP; __le32 RDB; __le32 TDBH; __le32 TDBD; __le32 TDBP; __le32 STSR; __le32 TDBB; __le32 NAR; __le32 CSR6A; __le32 IER; __le32 TKIPSCEP; __le32 LPC; __le32 CSR_TEST1; __le32 SPR; __le32 CSR_TEST0; __le32 WCSR; __le32 WPDR; __le32 GPTMR; __le32 GPIO; __le32 BBPCTL; __le32 SYNCTL; __le32 PLCPHD; __le32 MMIWA; __le32 MMIRD0; __le32 MMIRD1; __le32 TXBR; __le32 SYNDATA; __le32 ALCS; __le32 TOFS2; __le32 CMDR; __le32 PCIC; __le32 PMCSR; __le32 PAR0; __le32 PAR1; __le32 MAR0; __le32 MAR1; __le32 ATIMDA0; __le32 ABDA1; __le32 BSSID0; __le32 TXLMT; __le32 MIBCNT; __le32 BCNT; __le32 TSFTH; __le32 TSC; __le32 SYNRF; __le32 BPLI; __le32 CAP0; __le32 CAP1; __le32 RMD; __le32 CFPP; __le32 TOFS0; __le32 TOFS1; __le32 IFST; __le32 RSPT; __le32 TSFTL; __le32 WEPCTL; __le32 WESK; __le32 WEPCNT; __le32 MACTEST; __le32 FER; __le32 FEMR; __le32 FPSR; __le32 FFER; } ; 84 struct adm8211_desc { __le32 status; __le32 length; __le32 buffer1; __le32 buffer2; } ; 369 struct adm8211_rx_ring_info { struct sk_buff *skb; dma_addr_t mapping; } ; 442 struct adm8211_tx_ring_info { struct sk_buff *skb; dma_addr_t mapping; size_t hdrlen; } ; 448 struct adm8211_tx_hdr { u8 da[6U]; u8 signal; u8 service; __le16 frame_body_size; __le16 frame_control; __le16 plcp_frag_tail_len; __le16 plcp_frag_head_len; __le16 dur_frag_tail; __le16 dur_frag_head; u8 addr4[6U]; __le16 header_control; __le16 frag; u8 reserved_0; u8 retry_limit; u32 wep2key0; u32 wep2key1; u32 wep2key2; u32 wep2key3; u8 keyid; u8 entry_control; u16 reserved_1; u32 reserved_2; } ; 488 struct adm8211_eeprom { __le16 signature; u8 major_version; u8 minor_version; u8 reserved_1[4U]; u8 hwaddr[6U]; u8 reserved_2[8U]; __le16 cr49; u8 cr03; u8 cr28; u8 cr29; u8 country_code; u8 specific_bbptype; u8 specific_rftype; u8 reserved_3[2U]; __le16 device_id; __le16 vendor_id; __le16 subsystem_id; __le16 subsystem_vendor_id; u8 maxlat; u8 mingnt; __le16 cis_pointer_low; __le16 cis_pointer_high; __le16 csr18; u8 reserved_4[16U]; u8 d1_pwrdara; u8 d0_pwrdara; u8 d3_pwrdara; u8 d2_pwrdara; u8 antenna_power[14U]; __le16 cis_wordcnt; u8 tx_power[14U]; u8 lpf_cutoff[14U]; u8 lnags_threshold[14U]; __le16 checksum; u8 cis_data[0U]; } ; 535 enum ldv_43386 { ADM8211_RFMD2948 = 0, ADM8211_RFMD2958 = 1, ADM8211_RFMD2958_RF3000_CONTROL_POWER = 2, ADM8211_MAX2820 = 8, ADM8211_AL2210L = 12 } ; 543 struct adm8211_priv { struct pci_dev *pdev; spinlock_t lock; struct adm8211_csr *map; struct adm8211_desc *rx_ring; struct adm8211_desc *tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; struct adm8211_rx_ring_info *rx_buffers; struct adm8211_tx_ring_info *tx_buffers; unsigned int rx_ring_size; unsigned int tx_ring_size; unsigned int cur_tx; unsigned int dirty_tx; unsigned int cur_rx; struct ieee80211_low_level_stats stats; struct ieee80211_supported_band band; struct ieee80211_channel channels[14U]; int mode; int channel; u8 bssid[6U]; u8 soft_rx_crc; u8 retry_limit; u8 ant_power; u8 tx_power; u8 lpf_cutoff; u8 lnags_threshold; struct adm8211_eeprom *eeprom; size_t eeprom_len; u32 nar; unsigned char rf_type; unsigned char bbp_type; u8 specific_bbptype; enum ldv_43386 transceiver_type; } ; 586 struct ieee80211_chan_range { u8 min; u8 max; } ; 1 long int __builtin_expect(long, long); 137 void __clear_bit(long nr, volatile unsigned long *addr); 163 int printk(const char *, ...); 8 void ldv_dma_map_page(); 71 void warn_slowpath_null(const char *, const int); 9 extern unsigned long vmemmap_base; 23 unsigned long int __phys_addr(unsigned long); 32 void * __memcpy(void *, const void *, size_t ); 57 void * __memset(void *, int, size_t ); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 41 void _raw_spin_unlock(raw_spinlock_t *); 45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 289 raw_spinlock_t * spinlock_check(spinlock_t *lock); 300 void spin_lock(spinlock_t *lock); 345 void spin_unlock(spinlock_t *lock); 360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 31 unsigned int ioread32(void *); 41 void iowrite32(u32 , void *); 84 void pci_iounmap(struct pci_dev *, void *); 17 void * pci_iomap(struct pci_dev *, int, unsigned long); 87 const char * kobject_name(const struct kobject *kobj); 139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 158 void free_irq(unsigned int, void *); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 36 void get_random_bytes(void *, int); 233 int net_ratelimit(); 154 void kfree(const void *); 330 void * __kmalloc(size_t , gfp_t ); 478 void * kmalloc(size_t size, gfp_t flags); 868 const char * dev_name(const struct device *dev); 915 void * dev_get_drvdata(const struct device *dev); 920 void dev_set_drvdata(struct device *dev, void *data); 1129 void dev_printk(const char *, const struct device *, const char *, ...); 1138 void dev_err(const struct device *, const char *, ...); 1144 void _dev_info(const struct device *, const char *, ...); 37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool ); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 66 void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int); 70 void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int); 136 int valid_dma_direction(int dma_direction); 28 extern struct dma_map_ops *dma_ops; 30 struct dma_map_ops * get_dma_ops(struct device *dev); 42 bool arch_dma_alloc_attrs(struct device **, gfp_t *); 46 int dma_supported(struct device *, u64 ); 180 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 180 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 203 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); 315 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 327 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 450 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); 491 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 497 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); 549 int dma_set_mask(struct device *dev, u64 mask); 573 int dma_set_coherent_mask(struct device *dev, u64 mask); 904 void consume_skb(struct sk_buff *); 1874 unsigned char * skb_tail_pointer(const struct sk_buff *skb); 1912 unsigned char * skb_put(struct sk_buff *, unsigned int); 1922 unsigned char * skb_push(struct sk_buff *, unsigned int); 1930 unsigned char * skb_pull(struct sk_buff *, unsigned int); 2419 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t ); 2435 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length); 2449 struct sk_buff * dev_alloc_skb(unsigned int length); 10 void __const_udelay(unsigned long); 46 void msleep(unsigned int); 96 bool is_zero_ether_addr(const u8 *addr); 114 bool is_multicast_ether_addr(const u8 *addr); 189 bool is_valid_ether_addr(const u8 *addr); 221 void eth_random_addr(u8 *addr); 313 bool ether_addr_equal(const u8 *addr1, const u8 *addr2); 906 int pci_bus_read_config_byte(struct pci_bus *, unsigned int, int, u8 *); 910 int pci_bus_read_config_dword(struct pci_bus *, unsigned int, int, u32 *); 930 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val); 938 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); 998 int pci_enable_device(struct pci_dev *); 1015 void pci_disable_device(struct pci_dev *); 1018 void pci_set_master(struct pci_dev *); 1024 int pci_set_mwi(struct pci_dev *); 1071 int pci_save_state(struct pci_dev *); 1072 void pci_restore_state(struct pci_dev *); 1085 int pci_set_power_state(struct pci_dev *, pci_power_t ); 1086 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t ); 1144 int pci_request_regions(struct pci_dev *, const char *); 1146 void pci_release_regions(struct pci_dev *); 1631 void * pci_get_drvdata(struct pci_dev *pdev); 1636 void pci_set_drvdata(struct pci_dev *pdev, void *data); 1644 const char * pci_name(const struct pci_dev *pdev); 16 void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); 31 void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); 38 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); 44 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); 79 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); 86 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); 113 int pci_set_dma_mask(struct pci_dev *dev, u64 mask); 118 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); 14 extern const u8 byte_rev_table[256U]; 15 u8 __bitrev8(u8 byte); 20 u16 __bitrev16(u16 x); 25 u32 __bitrev32(u32 x); 11 u32 crc32_le(u32 , const unsigned char *, size_t ); 76 void eeprom_93cx6_multiread(struct eeprom_93cx6 *, const u8 , __le16 *, const u16 ); 266 bool ieee80211_has_tods(__le16 fc); 2419 u8 * ieee80211_get_DA(struct ieee80211_hdr *hdr); 3619 void set_wiphy_dev(struct wiphy *wiphy, struct device *dev); 3876 int ieee80211_frequency_to_channel(int); 4030 unsigned int ieee80211_hdrlen(__le16 ); 962 struct ieee80211_tx_info * IEEE80211_SKB_CB(struct sk_buff *skb); 967 struct ieee80211_rx_status * IEEE80211_SKB_RXCB(struct sk_buff *skb); 987 void ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info); 2228 void _ieee80211_hw_set(struct ieee80211_hw *hw, enum ieee80211_hw_flags flg); 2294 void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev); 2305 void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr); 2311 struct ieee80211_rate * ieee80211_get_tx_rate(const struct ieee80211_hw *hw, const struct ieee80211_tx_info *c); 3719 struct ieee80211_hw * ieee80211_alloc_hw_nm(size_t , const struct ieee80211_ops *, const char *); 3738 struct ieee80211_hw * ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops); 3755 int ieee80211_register_hw(struct ieee80211_hw *); 3909 void ieee80211_unregister_hw(struct ieee80211_hw *); 3920 void ieee80211_free_hw(struct ieee80211_hw *); 3999 void ieee80211_rx_irqsafe(struct ieee80211_hw *, struct sk_buff *); 4220 void ieee80211_tx_status_irqsafe(struct ieee80211_hw *, struct sk_buff *); 4711 void ieee80211_wake_queue(struct ieee80211_hw *, int); 4720 void ieee80211_stop_queue(struct ieee80211_hw *, int); 592 const struct ieee80211_chan_range cranges[7U] = { { 1U, 11U }, { 1U, 11U }, { 1U, 13U }, { 10U, 11U }, { 10U, 13U }, { 14U, 14U }, { 1U, 14U } }; 39 unsigned int tx_ring_size = 16U; 40 unsigned int rx_ring_size = 16U; 54 struct ieee80211_rate adm8211_rates[5U] = { { 1U, 10U, (unsigned short)0, (unsigned short)0 }, { 1U, 20U, (unsigned short)0, (unsigned short)0 }, { 1U, 55U, (unsigned short)0, (unsigned short)0 }, { 1U, 110U, (unsigned short)0, (unsigned short)0 }, { 1U, 220U, (unsigned short)0, (unsigned short)0 } }; 62 const struct ieee80211_channel adm8211_channels[14U] = { { 0, 2412U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2417U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2422U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2427U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2432U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2437U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2442U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2447U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2452U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2457U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2462U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2467U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2472U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U }, { 0, 2484U, (unsigned short)0, 0U, 0, 0, 0, (_Bool)0, 0U, 0, 0, 0, 0UL, 0U } }; 80 void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom); 91 void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom); 109 int adm8211_read_eeprom(struct ieee80211_hw *dev); 246 void adm8211_write_sram(struct ieee80211_hw *dev, u32 addr, u32 data); 262 void adm8211_write_sram_bytes(struct ieee80211_hw *dev, unsigned int addr, u8 *buf, unsigned int len); 286 void adm8211_clear_sram(struct ieee80211_hw *dev); 298 int adm8211_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats); 308 void adm8211_interrupt_tci(struct ieee80211_hw *dev); 356 void adm8211_interrupt_rci(struct ieee80211_hw *dev); 457 irqreturn_t adm8211_interrupt(int irq, void *dev_id); 552 void adm8211_rf_write_syn_max2820(struct ieee80211_hw *dev, u16 addr, u32 value); 553 void adm8211_rf_write_syn_al2210l(struct ieee80211_hw *dev, u16 addr, u32 value); 554 void adm8211_rf_write_syn_rfmd2958(struct ieee80211_hw *dev, u16 addr, u32 value); 555 void adm8211_rf_write_syn_rfmd2948(struct ieee80211_hw *dev, u16 addr, u32 value); 559 int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data); 619 int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan); 751 void adm8211_update_mode(struct ieee80211_hw *dev); 780 void adm8211_hw_init_syn(struct ieee80211_hw *dev); 838 int adm8211_hw_init_bbp(struct ieee80211_hw *dev); 1050 int adm8211_set_rate(struct ieee80211_hw *dev); 1087 void adm8211_hw_init(struct ieee80211_hw *dev); 1212 int adm8211_hw_reset(struct ieee80211_hw *dev); 1257 u64 adm8211_get_tsft(struct ieee80211_hw *dev, struct ieee80211_vif *vif); 1272 void adm8211_set_interval(struct ieee80211_hw *dev, unsigned short bi, unsigned short li); 1284 void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid); 1296 int adm8211_config(struct ieee80211_hw *dev, u32 changed); 1311 void adm8211_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changes); 1327 u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list); 1346 void adm8211_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); 1391 int adm8211_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif); 1418 void adm8211_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif); 1425 int adm8211_init_rings(struct ieee80211_hw *dev); 1476 void adm8211_free_rings(struct ieee80211_hw *dev); 1506 int adm8211_start(struct ieee80211_hw *dev); 1549 void adm8211_stop(struct ieee80211_hw *dev); 1564 void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len, int plcp_signal, int short_preamble); 1617 void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, u16 plcp_signal, size_t hdrlen); 1664 void adm8211_tx(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb); 1717 int adm8211_alloc_rings(struct ieee80211_hw *dev); 1750 const struct ieee80211_ops adm8211_ops = { &adm8211_tx, &adm8211_start, &adm8211_stop, 0, 0, 0, &adm8211_add_interface, 0, &adm8211_remove_interface, &adm8211_config, &adm8211_bss_info_changed, 0, 0, &adm8211_prepare_multicast, &adm8211_configure_filter, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &adm8211_get_stats, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &adm8211_get_tsft, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 1764 int adm8211_probe(struct pci_dev *pdev, const struct pci_device_id *id); 1937 void adm8211_remove(struct pci_dev *pdev); 1964 int adm8211_suspend(struct pci_dev *pdev, pm_message_t state); 1971 int adm8211_resume(struct pci_dev *pdev); 1980 const struct pci_device_id __mod_pci__adm8211_pci_id_table_device_table[5U] = { }; 2012 void ldv_check_final_state(); 2015 void ldv_check_return_value(int); 2018 void ldv_check_return_value_probe(int); 2021 void ldv_initialize(); 2024 void ldv_handler_precall(); 2027 int nondet_int(); 2030 int LDV_IN_INTERRUPT = 0; 2033 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 7 bool ldv_is_err(const void *ptr); 14 void * ldv_err_ptr(long error); 21 long int ldv_ptr_err(const void *ptr); 28 bool ldv_is_err_or_null(const void *ptr); 5 int LDV_DMA_MAP_CALLS = 0; 16 void ldv_dma_mapping_error(); return ; } { 2035 struct eeprom_93cx6 *var_group1; 2036 struct ieee80211_hw *var_group2; 2037 struct ieee80211_tx_control *var_group3; 2038 struct sk_buff *var_adm8211_tx_33_p2; 2039 struct ieee80211_vif *var_group4; 2040 unsigned int var_adm8211_config_21_p1; 2041 struct ieee80211_bss_conf *var_adm8211_bss_info_changed_22_p2; 2042 unsigned int var_adm8211_bss_info_changed_22_p3; 2043 struct netdev_hw_addr_list *var_group5; 2044 unsigned int var_adm8211_configure_filter_24_p1; 2045 unsigned int *var_adm8211_configure_filter_24_p2; 2046 unsigned long long var_adm8211_configure_filter_24_p3; 2047 struct ieee80211_low_level_stats *var_group6; 2048 struct pci_dev *var_group7; 2049 const struct pci_device_id *var_adm8211_probe_35_p1; 2050 int res_adm8211_probe_35; 2051 struct pm_message var_adm8211_suspend_37_p1; 2052 int var_adm8211_interrupt_9_p0; 2053 void *var_adm8211_interrupt_9_p1; 2054 int ldv_s_adm8211_driver_pci_driver; 2055 int tmp; 2056 int tmp___0; 3427 ldv_s_adm8211_driver_pci_driver = 0; 3413 LDV_IN_INTERRUPT = 1; 3422 ldv_initialize() { /* Function call is skipped due to function is undefined */} 3433 goto ldv_65578; 3433 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 3436 goto ldv_65577; 3434 ldv_65577:; 3437 tmp = nondet_int() { /* Function call is skipped due to function is undefined */} 3437 switch (tmp); 4955 LDV_IN_INTERRUPT = 2; 4960 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 459 struct ieee80211_hw *dev; 460 struct adm8211_priv *priv; 461 unsigned int stsr; 462 unsigned int tmp; 463 long tmp___0; 464 long tmp___1; 465 long tmp___2; 466 long tmp___3; 467 long tmp___4; 468 long tmp___5; 469 long tmp___6; 470 long tmp___7; 471 long tmp___8; 472 long tmp___9; 473 long tmp___10; 474 long tmp___11; 475 long tmp___12; 476 long tmp___13; 477 long tmp___14; 478 long tmp___15; 479 long tmp___16; 465 dev = (struct ieee80211_hw *)dev_id; 466 struct adm8211_priv *__CPAchecker_TMP_0 = (struct adm8211_priv *)(dev->priv); 466 priv = __CPAchecker_TMP_0; 467 tmp = ioread32((void *)(&(priv->map->STSR))) { /* Function call is skipped due to function is undefined */} 467 stsr = tmp; 468 iowrite32(stsr, (void *)(&(priv->map->STSR))) { /* Function call is skipped due to function is undefined */} { } 358 struct adm8211_priv *priv; 359 unsigned int entry; 360 unsigned int status; 361 unsigned int pktlen; 362 struct sk_buff *skb; 363 struct sk_buff *newskb; 364 unsigned int limit; 365 unsigned char rssi; 366 unsigned char rate; 367 unsigned int tmp; 368 int tmp___0; 369 unsigned char *tmp___1; 370 unsigned char *tmp___2; 371 unsigned char *tmp___3; 372 struct ieee80211_rx_status rx_status; 373 struct ieee80211_rx_status *tmp___4; 358 struct adm8211_priv *__CPAchecker_TMP_0 = (struct adm8211_priv *)(dev->priv); 358 priv = __CPAchecker_TMP_0; 359 entry = (priv->cur_rx) % (priv->rx_ring_size); 363 limit = priv->rx_ring_size; 366 goto ldv_65134; 366 int __CPAchecker_TMP_7 = (int)(((priv->rx_ring) + ((unsigned long)entry))->status); 368 goto ldv_65133; 367 ldv_65133:; 367 tmp = limit; 367 limit = limit - 1U; 370 status = ((priv->rx_ring) + ((unsigned long)entry))->status; 371 rate = (u8 )((status & 61440U) >> 12); 372 u8 __CPAchecker_TMP_1 = (u8 )(((priv->rx_ring) + ((unsigned long)entry))->length); 372 rssi = ((unsigned int)__CPAchecker_TMP_1) & 127U; 375 pktlen = status & 4095U; 377 tmp___0 = net_ratelimit() { /* Function call is skipped due to function is undefined */} 378 dev_printk("\017", (const struct device *)(&(dev->wiphy->dev)), "frame too long (%d)\n", pktlen) { /* Function call is skipped due to function is undefined */} 380 pktlen = 2500U; 383 unsigned int __CPAchecker_TMP_2 = (unsigned int)(priv->soft_rx_crc); { 2451 struct sk_buff *tmp; { 2437 struct sk_buff *tmp; 2438 tmp = __netdev_alloc_skb(dev, length, 34078752U) { /* Function call is skipped due to function is undefined */} 2438 return tmp;; } 2451 return tmp;; } 405 skb = ((priv->rx_buffers) + ((unsigned long)entry))->skb; 406 skb_put(skb, pktlen) { /* Function call is skipped due to function is undefined */} 407 -pci_unmap_single(priv->pdev, ((priv->rx_buffers) + ((unsigned long)entry))->mapping, 2500UL, 2) { 47 struct device *__CPAchecker_TMP_0; 47 assume(!(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)))); 47 __CPAchecker_TMP_0 = (struct device *)0; 47 -dma_unmap_single_attrs(__CPAchecker_TMP_0, dma_addr, size, (enum dma_data_direction )direction, 0UL) { 207 struct dma_map_ops *ops; 208 struct dma_map_ops *tmp; 209 int tmp___0; 210 long tmp___1; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 208 ops = tmp; { 138 int __CPAchecker_TMP_0; 138 assume(!(dma_direction == 0)); 138 assume(!(dma_direction == 1)); 138 assume(dma_direction == 2); __CPAchecker_TMP_0 = 1; 138 return __CPAchecker_TMP_0;; } 210 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 210 assume(!(tmp___1 != 0L)); 211 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->unmap_page); 211 assume(__CPAchecker_TMP_0 != ((unsigned long)((void (*)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long))0))); 212 (*(ops->unmap_page))(dev, addr, size, dir, attrs); 213 debug_dma_unmap_page(dev, addr, size, (int)dir, 1) { /* Function call is skipped due to function is undefined */} 214 return ;; } 48 return ;; } 411 ((priv->rx_buffers) + ((unsigned long)entry))->skb = newskb; { 1876 unsigned char *__CPAchecker_TMP_0 = (unsigned char *)(skb->head); 1876 unsigned long __CPAchecker_TMP_1 = (unsigned long)(skb->tail); 1876 return __CPAchecker_TMP_0 + __CPAchecker_TMP_1;; } { 41 unsigned long long tmp; 40 struct device *__CPAchecker_TMP_0; 40 assume(!(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)))); 40 __CPAchecker_TMP_0 = (struct device *)0; { 38 unsigned long long tmp; { } 184 struct dma_map_ops *ops; 185 struct dma_map_ops *tmp; 186 unsigned long long addr; 187 int tmp___0; 188 long tmp___1; 189 unsigned long tmp___2; 190 unsigned long tmp___3; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 185 ops = tmp; { 133 return ;; } { 138 int __CPAchecker_TMP_0; 138 assume(!(dma_direction == 0)); 138 assume(!(dma_direction == 1)); 138 assume(dma_direction == 2); __CPAchecker_TMP_0 = 1; 138 return __CPAchecker_TMP_0;; } 189 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 189 assume(!(tmp___1 != 0L)); 190 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 190 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs); 193 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 193 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */} 196 return addr;; } 40 return tmp;; } 422 unsigned int __CPAchecker_TMP_4 = (unsigned int)(((priv->rx_buffers) + ((unsigned long)entry))->mapping); 422 ((priv->rx_ring) + ((unsigned long)entry))->buffer1 = __CPAchecker_TMP_4; 426 ((priv->rx_ring) + ((unsigned long)entry))->status = 2684354560U; 428 unsigned int __CPAchecker_TMP_5; 428 __CPAchecker_TMP_5 = 33556932U; 428 ((priv->rx_ring) + ((unsigned long)entry))->length = __CPAchecker_TMP_5; 450 priv->cur_rx = (priv->cur_rx) + 1U; 450 entry = (priv->cur_rx) % (priv->rx_ring_size); 451 ldv_65134:; 366 int __CPAchecker_TMP_7 = (int)(((priv->rx_ring) + ((unsigned long)entry))->status); 368 goto ldv_65133; 367 ldv_65133:; 367 tmp = limit; 367 limit = limit - 1U; 370 status = ((priv->rx_ring) + ((unsigned long)entry))->status; 371 rate = (u8 )((status & 61440U) >> 12); 372 u8 __CPAchecker_TMP_1 = (u8 )(((priv->rx_ring) + ((unsigned long)entry))->length); 372 rssi = ((unsigned int)__CPAchecker_TMP_1) & 127U; 375 pktlen = status & 4095U; 377 tmp___0 = net_ratelimit() { /* Function call is skipped due to function is undefined */} 378 dev_printk("\017", (const struct device *)(&(dev->wiphy->dev)), "frame too long (%d)\n", pktlen) { /* Function call is skipped due to function is undefined */} 380 pktlen = 2500U; 383 unsigned int __CPAchecker_TMP_2 = (unsigned int)(priv->soft_rx_crc); { 2451 struct sk_buff *tmp; { 2437 struct sk_buff *tmp; 2438 tmp = __netdev_alloc_skb(dev, length, 34078752U) { /* Function call is skipped due to function is undefined */} 2438 return tmp;; } 2451 return tmp;; } 405 skb = ((priv->rx_buffers) + ((unsigned long)entry))->skb; 406 skb_put(skb, pktlen) { /* Function call is skipped due to function is undefined */} 407 -pci_unmap_single(priv->pdev, ((priv->rx_buffers) + ((unsigned long)entry))->mapping, 2500UL, 2) { 47 struct device *__CPAchecker_TMP_0; 47 assume(!(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)))); 47 __CPAchecker_TMP_0 = (struct device *)0; 47 -dma_unmap_single_attrs(__CPAchecker_TMP_0, dma_addr, size, (enum dma_data_direction )direction, 0UL) { 207 struct dma_map_ops *ops; 208 struct dma_map_ops *tmp; 209 int tmp___0; 210 long tmp___1; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 208 ops = tmp; { 138 int __CPAchecker_TMP_0; 138 assume(!(dma_direction == 0)); 138 assume(!(dma_direction == 1)); 138 assume(dma_direction == 2); __CPAchecker_TMP_0 = 1; 138 return __CPAchecker_TMP_0;; } 210 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 210 assume(!(tmp___1 != 0L)); 211 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->unmap_page); 211 assume(__CPAchecker_TMP_0 != ((unsigned long)((void (*)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long))0))); 212 (*(ops->unmap_page))(dev, addr, size, dir, attrs); 213 debug_dma_unmap_page(dev, addr, size, (int)dir, 1) { /* Function call is skipped due to function is undefined */} 214 return ;; } 48 return ;; } 411 ((priv->rx_buffers) + ((unsigned long)entry))->skb = newskb; { 1876 unsigned char *__CPAchecker_TMP_0 = (unsigned char *)(skb->head); 1876 unsigned long __CPAchecker_TMP_1 = (unsigned long)(skb->tail); 1876 return __CPAchecker_TMP_0 + __CPAchecker_TMP_1;; } { } 41 unsigned long long tmp; 40 struct device *__CPAchecker_TMP_0; 40 assume(!(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)))); 40 __CPAchecker_TMP_0 = (struct device *)0; } | Source code
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <asm/io.h>
13 #include <asm/swiotlb.h>
14 #include <linux/dma-contiguous.h>
15
16 #ifdef CONFIG_ISA
17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
18 #else
19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
20 #endif
21
22 #define DMA_ERROR_CODE 0
23
24 extern int iommu_merge;
25 extern struct device x86_dma_fallback_dev;
26 extern int panic_on_overflow;
27
28 extern struct dma_map_ops *dma_ops;
29
30 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
31 {
32 #ifndef CONFIG_X86_DEV_DMA_OPS
33 return dma_ops;
34 #else
35 if (unlikely(!dev) || !dev->archdata.dma_ops)
36 return dma_ops;
37 else
38 return dev->archdata.dma_ops;
39 #endif
40 }
41
42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
44
45 #define HAVE_ARCH_DMA_SUPPORTED 1
46 extern int dma_supported(struct device *hwdev, u64 mask);
47
48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
49 dma_addr_t *dma_addr, gfp_t flag,
50 unsigned long attrs);
51
52 extern void dma_generic_free_coherent(struct device *dev, size_t size,
53 void *vaddr, dma_addr_t dma_addr,
54 unsigned long attrs);
55
56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
60 #else
61
62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
63 {
64 if (!dev->dma_mask)
65 return 0;
66
67 return addr + size - 1 <= *dev->dma_mask;
68 }
69
70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
71 {
72 return paddr;
73 }
74
75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
76 {
77 return daddr;
78 }
79 #endif /* CONFIG_X86_DMA_REMAP */
80
81 static inline void
82 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
83 enum dma_data_direction dir)
84 {
85 flush_write_buffers();
86 }
87
88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
89 gfp_t gfp)
90 {
91 unsigned long dma_mask = 0;
92
93 dma_mask = dev->coherent_dma_mask;
94 if (!dma_mask)
95 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
96
97 return dma_mask;
98 }
99
100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
101 {
102 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
103
104 if (dma_mask <= DMA_BIT_MASK(24))
105 gfp |= GFP_DMA;
106 #ifdef CONFIG_X86_64
107 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
108 gfp |= GFP_DMA32;
109 #endif
110 return gfp;
111 }
112
113 #endif 1
2
3 /*
4 * Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP)
5 *
6 * Copyright (c) 2003, Jouni Malinen <j@w1.fi>
7 * Copyright (c) 2004-2007, Michael Wu <flamingice@sourmilk.net>
8 * Some parts copyright (c) 2003 by David Young <dyoung@pobox.com>
9 * and used with permission.
10 *
11 * Much thanks to Infineon-ADMtek for their support of this driver.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation. See README and COPYING for
16 * more details.
17 */
18
19 #include <linux/interrupt.h>
20 #include <linux/if.h>
21 #include <linux/skbuff.h>
22 #include <linux/slab.h>
23 #include <linux/etherdevice.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/crc32.h>
27 #include <linux/eeprom_93cx6.h>
28 #include <linux/module.h>
29 #include <net/mac80211.h>
30
31 #include "adm8211.h"
32
33 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
34 MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
35 MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211");
36 MODULE_SUPPORTED_DEVICE("ADM8211");
37 MODULE_LICENSE("GPL");
38
39 static unsigned int tx_ring_size __read_mostly = 16;
40 static unsigned int rx_ring_size __read_mostly = 16;
41
42 module_param(tx_ring_size, uint, 0);
43 module_param(rx_ring_size, uint, 0);
44
45 static const struct pci_device_id adm8211_pci_id_table[] = {
46 /* ADMtek ADM8211 */
47 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
48 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */
49 { PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */
50 { PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */
51 { 0 }
52 };
53
54 static struct ieee80211_rate adm8211_rates[] = {
55 { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
56 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
57 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
58 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
59 { .bitrate = 220, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, /* XX ?? */
60 };
61
62 static const struct ieee80211_channel adm8211_channels[] = {
63 { .center_freq = 2412},
64 { .center_freq = 2417},
65 { .center_freq = 2422},
66 { .center_freq = 2427},
67 { .center_freq = 2432},
68 { .center_freq = 2437},
69 { .center_freq = 2442},
70 { .center_freq = 2447},
71 { .center_freq = 2452},
72 { .center_freq = 2457},
73 { .center_freq = 2462},
74 { .center_freq = 2467},
75 { .center_freq = 2472},
76 { .center_freq = 2484},
77 };
78
79
80 static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)
81 {
82 struct adm8211_priv *priv = eeprom->data;
83 u32 reg = ADM8211_CSR_READ(SPR);
84
85 eeprom->reg_data_in = reg & ADM8211_SPR_SDI;
86 eeprom->reg_data_out = reg & ADM8211_SPR_SDO;
87 eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK;
88 eeprom->reg_chip_select = reg & ADM8211_SPR_SCS;
89 }
90
91 static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom)
92 {
93 struct adm8211_priv *priv = eeprom->data;
94 u32 reg = 0x4000 | ADM8211_SPR_SRS;
95
96 if (eeprom->reg_data_in)
97 reg |= ADM8211_SPR_SDI;
98 if (eeprom->reg_data_out)
99 reg |= ADM8211_SPR_SDO;
100 if (eeprom->reg_data_clock)
101 reg |= ADM8211_SPR_SCLK;
102 if (eeprom->reg_chip_select)
103 reg |= ADM8211_SPR_SCS;
104
105 ADM8211_CSR_WRITE(SPR, reg);
106 ADM8211_CSR_READ(SPR); /* eeprom_delay */
107 }
108
109 static int adm8211_read_eeprom(struct ieee80211_hw *dev)
110 {
111 struct adm8211_priv *priv = dev->priv;
112 unsigned int words, i;
113 struct ieee80211_chan_range chan_range;
114 u16 cr49;
115 struct eeprom_93cx6 eeprom = {
116 .data = priv,
117 .register_read = adm8211_eeprom_register_read,
118 .register_write = adm8211_eeprom_register_write
119 };
120
121 if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) {
122 /* 256 * 16-bit = 512 bytes */
123 eeprom.width = PCI_EEPROM_WIDTH_93C66;
124 words = 256;
125 } else {
126 /* 64 * 16-bit = 128 bytes */
127 eeprom.width = PCI_EEPROM_WIDTH_93C46;
128 words = 64;
129 }
130
131 priv->eeprom_len = words * 2;
132 priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL);
133 if (!priv->eeprom)
134 return -ENOMEM;
135
136 eeprom_93cx6_multiread(&eeprom, 0, (__le16 *)priv->eeprom, words);
137
138 cr49 = le16_to_cpu(priv->eeprom->cr49);
139 priv->rf_type = (cr49 >> 3) & 0x7;
140 switch (priv->rf_type) {
141 case ADM8211_TYPE_INTERSIL:
142 case ADM8211_TYPE_RFMD:
143 case ADM8211_TYPE_MARVEL:
144 case ADM8211_TYPE_AIROHA:
145 case ADM8211_TYPE_ADMTEK:
146 break;
147
148 default:
149 if (priv->pdev->revision < ADM8211_REV_CA)
150 priv->rf_type = ADM8211_TYPE_RFMD;
151 else
152 priv->rf_type = ADM8211_TYPE_AIROHA;
153
154 printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n",
155 pci_name(priv->pdev), (cr49 >> 3) & 0x7);
156 }
157
158 priv->bbp_type = cr49 & 0x7;
159 switch (priv->bbp_type) {
160 case ADM8211_TYPE_INTERSIL:
161 case ADM8211_TYPE_RFMD:
162 case ADM8211_TYPE_MARVEL:
163 case ADM8211_TYPE_AIROHA:
164 case ADM8211_TYPE_ADMTEK:
165 break;
166 default:
167 if (priv->pdev->revision < ADM8211_REV_CA)
168 priv->bbp_type = ADM8211_TYPE_RFMD;
169 else
170 priv->bbp_type = ADM8211_TYPE_ADMTEK;
171
172 printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n",
173 pci_name(priv->pdev), cr49 >> 3);
174 }
175
176 if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) {
177 printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n",
178 pci_name(priv->pdev), priv->eeprom->country_code);
179
180 chan_range = cranges[2];
181 } else
182 chan_range = cranges[priv->eeprom->country_code];
183
184 printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n",
185 pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max);
186
187 BUILD_BUG_ON(sizeof(priv->channels) != sizeof(adm8211_channels));
188
189 memcpy(priv->channels, adm8211_channels, sizeof(priv->channels));
190 priv->band.channels = priv->channels;
191 priv->band.n_channels = ARRAY_SIZE(adm8211_channels);
192 priv->band.bitrates = adm8211_rates;
193 priv->band.n_bitrates = ARRAY_SIZE(adm8211_rates);
194
195 for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++)
196 if (i < chan_range.min || i > chan_range.max)
197 priv->channels[i - 1].flags |= IEEE80211_CHAN_DISABLED;
198
199 switch (priv->eeprom->specific_bbptype) {
200 case ADM8211_BBP_RFMD3000:
201 case ADM8211_BBP_RFMD3002:
202 case ADM8211_BBP_ADM8011:
203 priv->specific_bbptype = priv->eeprom->specific_bbptype;
204 break;
205
206 default:
207 if (priv->pdev->revision < ADM8211_REV_CA)
208 priv->specific_bbptype = ADM8211_BBP_RFMD3000;
209 else
210 priv->specific_bbptype = ADM8211_BBP_ADM8011;
211
212 printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n",
213 pci_name(priv->pdev), priv->eeprom->specific_bbptype);
214 }
215
216 switch (priv->eeprom->specific_rftype) {
217 case ADM8211_RFMD2948:
218 case ADM8211_RFMD2958:
219 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
220 case ADM8211_MAX2820:
221 case ADM8211_AL2210L:
222 priv->transceiver_type = priv->eeprom->specific_rftype;
223 break;
224
225 default:
226 if (priv->pdev->revision == ADM8211_REV_BA)
227 priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER;
228 else if (priv->pdev->revision == ADM8211_REV_CA)
229 priv->transceiver_type = ADM8211_AL2210L;
230 else if (priv->pdev->revision == ADM8211_REV_AB)
231 priv->transceiver_type = ADM8211_RFMD2948;
232
233 printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n",
234 pci_name(priv->pdev), priv->eeprom->specific_rftype);
235
236 break;
237 }
238
239 printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d "
240 "Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type,
241 priv->bbp_type, priv->specific_bbptype, priv->transceiver_type);
242
243 return 0;
244 }
245
246 static inline void adm8211_write_sram(struct ieee80211_hw *dev,
247 u32 addr, u32 data)
248 {
249 struct adm8211_priv *priv = dev->priv;
250
251 ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR |
252 (priv->pdev->revision < ADM8211_REV_BA ?
253 0 : ADM8211_WEPCTL_SEL_WEPTABLE ));
254 ADM8211_CSR_READ(WEPCTL);
255 msleep(1);
256
257 ADM8211_CSR_WRITE(WESK, data);
258 ADM8211_CSR_READ(WESK);
259 msleep(1);
260 }
261
262 static void adm8211_write_sram_bytes(struct ieee80211_hw *dev,
263 unsigned int addr, u8 *buf,
264 unsigned int len)
265 {
266 struct adm8211_priv *priv = dev->priv;
267 u32 reg = ADM8211_CSR_READ(WEPCTL);
268 unsigned int i;
269
270 if (priv->pdev->revision < ADM8211_REV_BA) {
271 for (i = 0; i < len; i += 2) {
272 u16 val = buf[i] | (buf[i + 1] << 8);
273 adm8211_write_sram(dev, addr + i / 2, val);
274 }
275 } else {
276 for (i = 0; i < len; i += 4) {
277 u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) |
278 (buf[i + 2] << 16) | (buf[i + 3] << 24);
279 adm8211_write_sram(dev, addr + i / 4, val);
280 }
281 }
282
283 ADM8211_CSR_WRITE(WEPCTL, reg);
284 }
285
286 static void adm8211_clear_sram(struct ieee80211_hw *dev)
287 {
288 struct adm8211_priv *priv = dev->priv;
289 u32 reg = ADM8211_CSR_READ(WEPCTL);
290 unsigned int addr;
291
292 for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++)
293 adm8211_write_sram(dev, addr, 0);
294
295 ADM8211_CSR_WRITE(WEPCTL, reg);
296 }
297
298 static int adm8211_get_stats(struct ieee80211_hw *dev,
299 struct ieee80211_low_level_stats *stats)
300 {
301 struct adm8211_priv *priv = dev->priv;
302
303 memcpy(stats, &priv->stats, sizeof(*stats));
304
305 return 0;
306 }
307
308 static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
309 {
310 struct adm8211_priv *priv = dev->priv;
311 unsigned int dirty_tx;
312
313 spin_lock(&priv->lock);
314
315 for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) {
316 unsigned int entry = dirty_tx % priv->tx_ring_size;
317 u32 status = le32_to_cpu(priv->tx_ring[entry].status);
318 struct ieee80211_tx_info *txi;
319 struct adm8211_tx_ring_info *info;
320 struct sk_buff *skb;
321
322 if (status & TDES0_CONTROL_OWN ||
323 !(status & TDES0_CONTROL_DONE))
324 break;
325
326 info = &priv->tx_buffers[entry];
327 skb = info->skb;
328 txi = IEEE80211_SKB_CB(skb);
329
330 /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
331
332 pci_unmap_single(priv->pdev, info->mapping,
333 info->skb->len, PCI_DMA_TODEVICE);
334
335 ieee80211_tx_info_clear_status(txi);
336
337 skb_pull(skb, sizeof(struct adm8211_tx_hdr));
338 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
339 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) &&
340 !(status & TDES0_STATUS_ES))
341 txi->flags |= IEEE80211_TX_STAT_ACK;
342
343 ieee80211_tx_status_irqsafe(dev, skb);
344
345 info->skb = NULL;
346 }
347
348 if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2)
349 ieee80211_wake_queue(dev, 0);
350
351 priv->dirty_tx = dirty_tx;
352 spin_unlock(&priv->lock);
353 }
354
355
356 static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
357 {
358 struct adm8211_priv *priv = dev->priv;
359 unsigned int entry = priv->cur_rx % priv->rx_ring_size;
360 u32 status;
361 unsigned int pktlen;
362 struct sk_buff *skb, *newskb;
363 unsigned int limit = priv->rx_ring_size;
364 u8 rssi, rate;
365
366 while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) {
367 if (!limit--)
368 break;
369
370 status = le32_to_cpu(priv->rx_ring[entry].status);
371 rate = (status & RDES0_STATUS_RXDR) >> 12;
372 rssi = le32_to_cpu(priv->rx_ring[entry].length) &
373 RDES1_STATUS_RSSI;
374
375 pktlen = status & RDES0_STATUS_FL;
376 if (pktlen > RX_PKT_SIZE) {
377 if (net_ratelimit())
378 wiphy_debug(dev->wiphy, "frame too long (%d)\n",
379 pktlen);
380 pktlen = RX_PKT_SIZE;
381 }
382
383 if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) {
384 skb = NULL; /* old buffer will be reused */
385 /* TODO: update RX error stats */
386 /* TODO: check RDES0_STATUS_CRC*E */
387 } else if (pktlen < RX_COPY_BREAK) {
388 skb = dev_alloc_skb(pktlen);
389 if (skb) {
390 pci_dma_sync_single_for_cpu(
391 priv->pdev,
392 priv->rx_buffers[entry].mapping,
393 pktlen, PCI_DMA_FROMDEVICE);
394 memcpy(skb_put(skb, pktlen),
395 skb_tail_pointer(priv->rx_buffers[entry].skb),
396 pktlen);
397 pci_dma_sync_single_for_device(
398 priv->pdev,
399 priv->rx_buffers[entry].mapping,
400 RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
401 }
402 } else {
403 newskb = dev_alloc_skb(RX_PKT_SIZE);
404 if (newskb) {
405 skb = priv->rx_buffers[entry].skb;
406 skb_put(skb, pktlen);
407 pci_unmap_single(
408 priv->pdev,
409 priv->rx_buffers[entry].mapping,
410 RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
411 priv->rx_buffers[entry].skb = newskb;
412 priv->rx_buffers[entry].mapping =
413 pci_map_single(priv->pdev,
414 skb_tail_pointer(newskb),
415 RX_PKT_SIZE,
416 PCI_DMA_FROMDEVICE);
417 } else {
418 skb = NULL;
419 /* TODO: update rx dropped stats */
420 }
421
422 priv->rx_ring[entry].buffer1 =
423 cpu_to_le32(priv->rx_buffers[entry].mapping);
424 }
425
426 priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN |
427 RDES0_STATUS_SQL);
428 priv->rx_ring[entry].length =
429 cpu_to_le32(RX_PKT_SIZE |
430 (entry == priv->rx_ring_size - 1 ?
431 RDES1_CONTROL_RER : 0));
432
433 if (skb) {
434 struct ieee80211_rx_status rx_status = {0};
435
436 if (priv->pdev->revision < ADM8211_REV_CA)
437 rx_status.signal = rssi;
438 else
439 rx_status.signal = 100 - rssi;
440
441 rx_status.rate_idx = rate;
442
443 rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
444 rx_status.band = NL80211_BAND_2GHZ;
445
446 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
447 ieee80211_rx_irqsafe(dev, skb);
448 }
449
450 entry = (++priv->cur_rx) % priv->rx_ring_size;
451 }
452
453 /* TODO: check LPC and update stats? */
454 }
455
456
457 static irqreturn_t adm8211_interrupt(int irq, void *dev_id)
458 {
459 #define ADM8211_INT(x) \
460 do { \
461 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
462 wiphy_debug(dev->wiphy, "%s\n", #x); \
463 } while (0)
464
465 struct ieee80211_hw *dev = dev_id;
466 struct adm8211_priv *priv = dev->priv;
467 u32 stsr = ADM8211_CSR_READ(STSR);
468 ADM8211_CSR_WRITE(STSR, stsr);
469 if (stsr == 0xffffffff)
470 return IRQ_HANDLED;
471
472 if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS)))
473 return IRQ_HANDLED;
474
475 if (stsr & ADM8211_STSR_RCI)
476 adm8211_interrupt_rci(dev);
477 if (stsr & ADM8211_STSR_TCI)
478 adm8211_interrupt_tci(dev);
479
480 ADM8211_INT(PCF);
481 ADM8211_INT(BCNTC);
482 ADM8211_INT(GPINT);
483 ADM8211_INT(ATIMTC);
484 ADM8211_INT(TSFTF);
485 ADM8211_INT(TSCZ);
486 ADM8211_INT(SQL);
487 ADM8211_INT(WEPTD);
488 ADM8211_INT(ATIME);
489 ADM8211_INT(TEIS);
490 ADM8211_INT(FBE);
491 ADM8211_INT(REIS);
492 ADM8211_INT(GPTT);
493 ADM8211_INT(RPS);
494 ADM8211_INT(RDU);
495 ADM8211_INT(TUF);
496 ADM8211_INT(TPS);
497
498 return IRQ_HANDLED;
499
500 #undef ADM8211_INT
501 }
502
503 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
504 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
505 u16 addr, u32 value) { \
506 struct adm8211_priv *priv = dev->priv; \
507 unsigned int i; \
508 u32 reg, bitbuf; \
509 \
510 value &= v_mask; \
511 addr &= a_mask; \
512 bitbuf = (value << v_shift) | (addr << a_shift); \
513 \
514 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
515 ADM8211_CSR_READ(SYNRF); \
516 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
517 ADM8211_CSR_READ(SYNRF); \
518 \
519 if (prewrite) { \
520 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
521 ADM8211_CSR_READ(SYNRF); \
522 } \
523 \
524 for (i = 0; i <= bits; i++) { \
525 if (bitbuf & (1 << (bits - i))) \
526 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
527 else \
528 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
529 \
530 ADM8211_CSR_WRITE(SYNRF, reg); \
531 ADM8211_CSR_READ(SYNRF); \
532 \
533 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
534 ADM8211_CSR_READ(SYNRF); \
535 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
536 ADM8211_CSR_READ(SYNRF); \
537 } \
538 \
539 if (postwrite == 1) { \
540 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
541 ADM8211_CSR_READ(SYNRF); \
542 } \
543 if (postwrite == 2) { \
544 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
545 ADM8211_CSR_READ(SYNRF); \
546 } \
547 \
548 ADM8211_CSR_WRITE(SYNRF, 0); \
549 ADM8211_CSR_READ(SYNRF); \
550 }
551
552 WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1)
553 WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1)
554 WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1)
555 WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2)
556
557 #undef WRITE_SYN
558
559 static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data)
560 {
561 struct adm8211_priv *priv = dev->priv;
562 unsigned int timeout;
563 u32 reg;
564
565 timeout = 10;
566 while (timeout > 0) {
567 reg = ADM8211_CSR_READ(BBPCTL);
568 if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD)))
569 break;
570 timeout--;
571 msleep(2);
572 }
573
574 if (timeout == 0) {
575 wiphy_debug(dev->wiphy,
576 "adm8211_write_bbp(%d,%d) failed prewrite (reg=0x%08x)\n",
577 addr, data, reg);
578 return -ETIMEDOUT;
579 }
580
581 switch (priv->bbp_type) {
582 case ADM8211_TYPE_INTERSIL:
583 reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */
584 break;
585 case ADM8211_TYPE_RFMD:
586 reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP |
587 (0x01 << 18);
588 break;
589 case ADM8211_TYPE_ADMTEK:
590 reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP |
591 (0x05 << 18);
592 break;
593 }
594 reg |= ADM8211_BBPCTL_WR | (addr << 8) | data;
595
596 ADM8211_CSR_WRITE(BBPCTL, reg);
597
598 timeout = 10;
599 while (timeout > 0) {
600 reg = ADM8211_CSR_READ(BBPCTL);
601 if (!(reg & ADM8211_BBPCTL_WR))
602 break;
603 timeout--;
604 msleep(2);
605 }
606
607 if (timeout == 0) {
608 ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) &
609 ~ADM8211_BBPCTL_WR);
610 wiphy_debug(dev->wiphy,
611 "adm8211_write_bbp(%d,%d) failed postwrite (reg=0x%08x)\n",
612 addr, data, reg);
613 return -ETIMEDOUT;
614 }
615
616 return 0;
617 }
618
619 static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
620 {
621 static const u32 adm8211_rfmd2958_reg5[] =
622 {0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340,
623 0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7};
624 static const u32 adm8211_rfmd2958_reg6[] =
625 {0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000,
626 0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745};
627
628 struct adm8211_priv *priv = dev->priv;
629 u8 ant_power = priv->ant_power > 0x3F ?
630 priv->eeprom->antenna_power[chan - 1] : priv->ant_power;
631 u8 tx_power = priv->tx_power > 0x3F ?
632 priv->eeprom->tx_power[chan - 1] : priv->tx_power;
633 u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ?
634 priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff;
635 u8 lnags_thresh = priv->lnags_threshold == 0xFF ?
636 priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold;
637 u32 reg;
638
639 ADM8211_IDLE();
640
641 /* Program synthesizer to new channel */
642 switch (priv->transceiver_type) {
643 case ADM8211_RFMD2958:
644 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
645 adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007);
646 adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033);
647
648 adm8211_rf_write_syn_rfmd2958(dev, 0x05,
649 adm8211_rfmd2958_reg5[chan - 1]);
650 adm8211_rf_write_syn_rfmd2958(dev, 0x06,
651 adm8211_rfmd2958_reg6[chan - 1]);
652 break;
653
654 case ADM8211_RFMD2948:
655 adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF,
656 SI4126_MAIN_XINDIV2);
657 adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN,
658 SI4126_POWERDOWN_PDIB |
659 SI4126_POWERDOWN_PDRB);
660 adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0);
661 adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV,
662 (chan == 14 ?
663 2110 : (2033 + (chan * 5))));
664 adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496);
665 adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44);
666 adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44);
667 break;
668
669 case ADM8211_MAX2820:
670 adm8211_rf_write_syn_max2820(dev, 0x3,
671 (chan == 14 ? 0x054 : (0x7 + (chan * 5))));
672 break;
673
674 case ADM8211_AL2210L:
675 adm8211_rf_write_syn_al2210l(dev, 0x0,
676 (chan == 14 ? 0x229B4 : (0x22967 + (chan * 5))));
677 break;
678
679 default:
680 wiphy_debug(dev->wiphy, "unsupported transceiver type %d\n",
681 priv->transceiver_type);
682 break;
683 }
684
685 /* write BBP regs */
686 if (priv->bbp_type == ADM8211_TYPE_RFMD) {
687
688 /* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */
689 /* TODO: remove if SMC 2635W doesn't need this */
690 if (priv->transceiver_type == ADM8211_RFMD2948) {
691 reg = ADM8211_CSR_READ(GPIO);
692 reg &= 0xfffc0000;
693 reg |= ADM8211_CSR_GPIO_EN0;
694 if (chan != 14)
695 reg |= ADM8211_CSR_GPIO_O0;
696 ADM8211_CSR_WRITE(GPIO, reg);
697 }
698
699 if (priv->transceiver_type == ADM8211_RFMD2958) {
700 /* set PCNT2 */
701 adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100);
702 /* set PCNT1 P_DESIRED/MID_BIAS */
703 reg = le16_to_cpu(priv->eeprom->cr49);
704 reg >>= 13;
705 reg <<= 15;
706 reg |= ant_power << 9;
707 adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg);
708 /* set TXRX TX_GAIN */
709 adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 |
710 (priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0));
711 } else {
712 reg = ADM8211_CSR_READ(PLCPHD);
713 reg &= 0xff00ffff;
714 reg |= tx_power << 18;
715 ADM8211_CSR_WRITE(PLCPHD, reg);
716 }
717
718 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF |
719 ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST);
720 ADM8211_CSR_READ(SYNRF);
721 msleep(30);
722
723 /* RF3000 BBP */
724 if (priv->transceiver_type != ADM8211_RFMD2958)
725 adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT,
726 tx_power<<2);
727 adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff);
728 adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh);
729 adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ?
730 priv->eeprom->cr28 : 0);
731 adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29);
732
733 ADM8211_CSR_WRITE(SYNRF, 0);
734
735 /* Nothing to do for ADMtek BBP */
736 } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
737 wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
738 priv->bbp_type);
739
740 ADM8211_RESTORE();
741
742 /* update current channel for adhoc (and maybe AP mode) */
743 reg = ADM8211_CSR_READ(CAP0);
744 reg &= ~0xF;
745 reg |= chan;
746 ADM8211_CSR_WRITE(CAP0, reg);
747
748 return 0;
749 }
750
751 static void adm8211_update_mode(struct ieee80211_hw *dev)
752 {
753 struct adm8211_priv *priv = dev->priv;
754
755 ADM8211_IDLE();
756
757 priv->soft_rx_crc = 0;
758 switch (priv->mode) {
759 case NL80211_IFTYPE_STATION:
760 priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA);
761 priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR;
762 break;
763 case NL80211_IFTYPE_ADHOC:
764 priv->nar &= ~ADM8211_NAR_PR;
765 priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR;
766
767 /* don't trust the error bits on rev 0x20 and up in adhoc */
768 if (priv->pdev->revision >= ADM8211_REV_BA)
769 priv->soft_rx_crc = 1;
770 break;
771 case NL80211_IFTYPE_MONITOR:
772 priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST);
773 priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR;
774 break;
775 }
776
777 ADM8211_RESTORE();
778 }
779
780 static void adm8211_hw_init_syn(struct ieee80211_hw *dev)
781 {
782 struct adm8211_priv *priv = dev->priv;
783
784 switch (priv->transceiver_type) {
785 case ADM8211_RFMD2958:
786 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
787 /* comments taken from ADMtek vendor driver */
788
789 /* Reset RF2958 after power on */
790 adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000);
791 /* Initialize RF VCO Core Bias to maximum */
792 adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F);
793 /* Initialize IF PLL */
794 adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03);
795 /* Initialize IF PLL Coarse Tuning */
796 adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F);
797 /* Initialize RF PLL */
798 adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403);
799 /* Initialize RF PLL Coarse Tuning */
800 adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F);
801 /* Initialize TX gain and filter BW (R9) */
802 adm8211_rf_write_syn_rfmd2958(dev, 0x09,
803 (priv->transceiver_type == ADM8211_RFMD2958 ?
804 0x10050 : 0x00050));
805 /* Initialize CAL register */
806 adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8);
807 break;
808
809 case ADM8211_MAX2820:
810 adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E);
811 adm8211_rf_write_syn_max2820(dev, 0x2, 0x001);
812 adm8211_rf_write_syn_max2820(dev, 0x3, 0x054);
813 adm8211_rf_write_syn_max2820(dev, 0x4, 0x310);
814 adm8211_rf_write_syn_max2820(dev, 0x5, 0x000);
815 break;
816
817 case ADM8211_AL2210L:
818 adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C);
819 adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB);
820 adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F);
821 adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9);
822 adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280);
823 adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641);
824 adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130);
825 adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000);
826 adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F);
827 adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C);
828 adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000);
829 adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000);
830 break;
831
832 case ADM8211_RFMD2948:
833 default:
834 break;
835 }
836 }
837
838 static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
839 {
840 struct adm8211_priv *priv = dev->priv;
841 u32 reg;
842
843 /* write addresses */
844 if (priv->bbp_type == ADM8211_TYPE_INTERSIL) {
845 ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A);
846 ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E);
847 ADM8211_CSR_WRITE(MMIRD1, 0x00100000);
848 } else if (priv->bbp_type == ADM8211_TYPE_RFMD ||
849 priv->bbp_type == ADM8211_TYPE_ADMTEK) {
850 /* check specific BBP type */
851 switch (priv->specific_bbptype) {
852 case ADM8211_BBP_RFMD3000:
853 case ADM8211_BBP_RFMD3002:
854 ADM8211_CSR_WRITE(MMIWA, 0x00009101);
855 ADM8211_CSR_WRITE(MMIRD0, 0x00000301);
856 break;
857
858 case ADM8211_BBP_ADM8011:
859 ADM8211_CSR_WRITE(MMIWA, 0x00008903);
860 ADM8211_CSR_WRITE(MMIRD0, 0x00001716);
861
862 reg = ADM8211_CSR_READ(BBPCTL);
863 reg &= ~ADM8211_BBPCTL_TYPE;
864 reg |= 0x5 << 18;
865 ADM8211_CSR_WRITE(BBPCTL, reg);
866 break;
867 }
868
869 switch (priv->pdev->revision) {
870 case ADM8211_REV_CA:
871 if (priv->transceiver_type == ADM8211_RFMD2958 ||
872 priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER ||
873 priv->transceiver_type == ADM8211_RFMD2948)
874 ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22);
875 else if (priv->transceiver_type == ADM8211_MAX2820 ||
876 priv->transceiver_type == ADM8211_AL2210L)
877 ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22);
878 break;
879
880 case ADM8211_REV_BA:
881 reg = ADM8211_CSR_READ(MMIRD1);
882 reg &= 0x0000FFFF;
883 reg |= 0x7e100000;
884 ADM8211_CSR_WRITE(MMIRD1, reg);
885 break;
886
887 case ADM8211_REV_AB:
888 case ADM8211_REV_AF:
889 default:
890 ADM8211_CSR_WRITE(MMIRD1, 0x7e100000);
891 break;
892 }
893
894 /* For RFMD */
895 ADM8211_CSR_WRITE(MACTEST, 0x800);
896 }
897
898 adm8211_hw_init_syn(dev);
899
900 /* Set RF Power control IF pin to PE1+PHYRST# */
901 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF |
902 ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST);
903 ADM8211_CSR_READ(SYNRF);
904 msleep(20);
905
906 /* write BBP regs */
907 if (priv->bbp_type == ADM8211_TYPE_RFMD) {
908 /* RF3000 BBP */
909 /* another set:
910 * 11: c8
911 * 14: 14
912 * 15: 50 (chan 1..13; chan 14: d0)
913 * 1c: 00
914 * 1d: 84
915 */
916 adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80);
917 /* antenna selection: diversity */
918 adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80);
919 adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74);
920 adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38);
921 adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40);
922
923 if (priv->eeprom->major_version < 2) {
924 adm8211_write_bbp(dev, 0x1c, 0x00);
925 adm8211_write_bbp(dev, 0x1d, 0x80);
926 } else {
927 if (priv->pdev->revision == ADM8211_REV_BA)
928 adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28);
929 else
930 adm8211_write_bbp(dev, 0x1c, 0x00);
931
932 adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29);
933 }
934 } else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) {
935 /* reset baseband */
936 adm8211_write_bbp(dev, 0x00, 0xFF);
937 /* antenna selection: diversity */
938 adm8211_write_bbp(dev, 0x07, 0x0A);
939
940 /* TODO: find documentation for this */
941 switch (priv->transceiver_type) {
942 case ADM8211_RFMD2958:
943 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
944 adm8211_write_bbp(dev, 0x00, 0x00);
945 adm8211_write_bbp(dev, 0x01, 0x00);
946 adm8211_write_bbp(dev, 0x02, 0x00);
947 adm8211_write_bbp(dev, 0x03, 0x00);
948 adm8211_write_bbp(dev, 0x06, 0x0f);
949 adm8211_write_bbp(dev, 0x09, 0x00);
950 adm8211_write_bbp(dev, 0x0a, 0x00);
951 adm8211_write_bbp(dev, 0x0b, 0x00);
952 adm8211_write_bbp(dev, 0x0c, 0x00);
953 adm8211_write_bbp(dev, 0x0f, 0xAA);
954 adm8211_write_bbp(dev, 0x10, 0x8c);
955 adm8211_write_bbp(dev, 0x11, 0x43);
956 adm8211_write_bbp(dev, 0x18, 0x40);
957 adm8211_write_bbp(dev, 0x20, 0x23);
958 adm8211_write_bbp(dev, 0x21, 0x02);
959 adm8211_write_bbp(dev, 0x22, 0x28);
960 adm8211_write_bbp(dev, 0x23, 0x30);
961 adm8211_write_bbp(dev, 0x24, 0x2d);
962 adm8211_write_bbp(dev, 0x28, 0x35);
963 adm8211_write_bbp(dev, 0x2a, 0x8c);
964 adm8211_write_bbp(dev, 0x2b, 0x81);
965 adm8211_write_bbp(dev, 0x2c, 0x44);
966 adm8211_write_bbp(dev, 0x2d, 0x0A);
967 adm8211_write_bbp(dev, 0x29, 0x40);
968 adm8211_write_bbp(dev, 0x60, 0x08);
969 adm8211_write_bbp(dev, 0x64, 0x01);
970 break;
971
972 case ADM8211_MAX2820:
973 adm8211_write_bbp(dev, 0x00, 0x00);
974 adm8211_write_bbp(dev, 0x01, 0x00);
975 adm8211_write_bbp(dev, 0x02, 0x00);
976 adm8211_write_bbp(dev, 0x03, 0x00);
977 adm8211_write_bbp(dev, 0x06, 0x0f);
978 adm8211_write_bbp(dev, 0x09, 0x05);
979 adm8211_write_bbp(dev, 0x0a, 0x02);
980 adm8211_write_bbp(dev, 0x0b, 0x00);
981 adm8211_write_bbp(dev, 0x0c, 0x0f);
982 adm8211_write_bbp(dev, 0x0f, 0x55);
983 adm8211_write_bbp(dev, 0x10, 0x8d);
984 adm8211_write_bbp(dev, 0x11, 0x43);
985 adm8211_write_bbp(dev, 0x18, 0x4a);
986 adm8211_write_bbp(dev, 0x20, 0x20);
987 adm8211_write_bbp(dev, 0x21, 0x02);
988 adm8211_write_bbp(dev, 0x22, 0x23);
989 adm8211_write_bbp(dev, 0x23, 0x30);
990 adm8211_write_bbp(dev, 0x24, 0x2d);
991 adm8211_write_bbp(dev, 0x2a, 0x8c);
992 adm8211_write_bbp(dev, 0x2b, 0x81);
993 adm8211_write_bbp(dev, 0x2c, 0x44);
994 adm8211_write_bbp(dev, 0x29, 0x4a);
995 adm8211_write_bbp(dev, 0x60, 0x2b);
996 adm8211_write_bbp(dev, 0x64, 0x01);
997 break;
998
999 case ADM8211_AL2210L:
1000 adm8211_write_bbp(dev, 0x00, 0x00);
1001 adm8211_write_bbp(dev, 0x01, 0x00);
1002 adm8211_write_bbp(dev, 0x02, 0x00);
1003 adm8211_write_bbp(dev, 0x03, 0x00);
1004 adm8211_write_bbp(dev, 0x06, 0x0f);
1005 adm8211_write_bbp(dev, 0x07, 0x05);
1006 adm8211_write_bbp(dev, 0x08, 0x03);
1007 adm8211_write_bbp(dev, 0x09, 0x00);
1008 adm8211_write_bbp(dev, 0x0a, 0x00);
1009 adm8211_write_bbp(dev, 0x0b, 0x00);
1010 adm8211_write_bbp(dev, 0x0c, 0x10);
1011 adm8211_write_bbp(dev, 0x0f, 0x55);
1012 adm8211_write_bbp(dev, 0x10, 0x8d);
1013 adm8211_write_bbp(dev, 0x11, 0x43);
1014 adm8211_write_bbp(dev, 0x18, 0x4a);
1015 adm8211_write_bbp(dev, 0x20, 0x20);
1016 adm8211_write_bbp(dev, 0x21, 0x02);
1017 adm8211_write_bbp(dev, 0x22, 0x23);
1018 adm8211_write_bbp(dev, 0x23, 0x30);
1019 adm8211_write_bbp(dev, 0x24, 0x2d);
1020 adm8211_write_bbp(dev, 0x2a, 0xaa);
1021 adm8211_write_bbp(dev, 0x2b, 0x81);
1022 adm8211_write_bbp(dev, 0x2c, 0x44);
1023 adm8211_write_bbp(dev, 0x29, 0xfa);
1024 adm8211_write_bbp(dev, 0x60, 0x2d);
1025 adm8211_write_bbp(dev, 0x64, 0x01);
1026 break;
1027
1028 case ADM8211_RFMD2948:
1029 break;
1030
1031 default:
1032 wiphy_debug(dev->wiphy, "unsupported transceiver %d\n",
1033 priv->transceiver_type);
1034 break;
1035 }
1036 } else
1037 wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
1038
1039 ADM8211_CSR_WRITE(SYNRF, 0);
1040
1041 /* Set RF CAL control source to MAC control */
1042 reg = ADM8211_CSR_READ(SYNCTL);
1043 reg |= ADM8211_SYNCTL_SELCAL;
1044 ADM8211_CSR_WRITE(SYNCTL, reg);
1045
1046 return 0;
1047 }
1048
1049 /* configures hw beacons/probe responses */
1050 static int adm8211_set_rate(struct ieee80211_hw *dev)
1051 {
1052 struct adm8211_priv *priv = dev->priv;
1053 u32 reg;
1054 int i = 0;
1055 u8 rate_buf[12] = {0};
1056
1057 /* write supported rates */
1058 if (priv->pdev->revision != ADM8211_REV_BA) {
1059 rate_buf[0] = ARRAY_SIZE(adm8211_rates);
1060 for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++)
1061 rate_buf[i + 1] = (adm8211_rates[i].bitrate / 5) | 0x80;
1062 } else {
1063 /* workaround for rev BA specific bug */
1064 rate_buf[0] = 0x04;
1065 rate_buf[1] = 0x82;
1066 rate_buf[2] = 0x04;
1067 rate_buf[3] = 0x0b;
1068 rate_buf[4] = 0x16;
1069 }
1070
1071 adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf,
1072 ARRAY_SIZE(adm8211_rates) + 1);
1073
1074 reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */
1075 reg |= 1 << 15; /* short preamble */
1076 reg |= 110 << 24;
1077 ADM8211_CSR_WRITE(PLCPHD, reg);
1078
1079 /* MTMLT = 512 TU (max TX MSDU lifetime)
1080 * BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate)
1081 * SRTYLIM = 224 (short retry limit, TX header value is default) */
1082 ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0));
1083
1084 return 0;
1085 }
1086
1087 static void adm8211_hw_init(struct ieee80211_hw *dev)
1088 {
1089 struct adm8211_priv *priv = dev->priv;
1090 u32 reg;
1091 u8 cline;
1092
1093 reg = ADM8211_CSR_READ(PAR);
1094 reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME;
1095 reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL);
1096
1097 if (!pci_set_mwi(priv->pdev)) {
1098 reg |= 0x1 << 24;
1099 pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
1100
1101 switch (cline) {
1102 case 0x8:
1103 reg |= (0x1 << 14);
1104 break;
1105 case 0x10:
1106 reg |= (0x2 << 14);
1107 break;
1108 case 0x20:
1109 reg |= (0x3 << 14);
1110 break;
1111 default:
1112 reg |= (0x0 << 14);
1113 break;
1114 }
1115 }
1116
1117 ADM8211_CSR_WRITE(PAR, reg);
1118
1119 reg = ADM8211_CSR_READ(CSR_TEST1);
1120 reg &= ~(0xF << 28);
1121 reg |= (1 << 28) | (1 << 31);
1122 ADM8211_CSR_WRITE(CSR_TEST1, reg);
1123
1124 /* lose link after 4 lost beacons */
1125 reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE;
1126 ADM8211_CSR_WRITE(WCSR, reg);
1127
1128 /* Disable APM, enable receive FIFO threshold, and set drain receive
1129 * threshold to store-and-forward */
1130 reg = ADM8211_CSR_READ(CMDR);
1131 reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT);
1132 reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF;
1133 ADM8211_CSR_WRITE(CMDR, reg);
1134
1135 adm8211_set_rate(dev);
1136
1137 /* 4-bit values:
1138 * PWR1UP = 8 * 2 ms
1139 * PWR0PAPE = 8 us or 5 us
1140 * PWR1PAPE = 1 us or 3 us
1141 * PWR0TRSW = 5 us
1142 * PWR1TRSW = 12 us
1143 * PWR0PE2 = 13 us
1144 * PWR1PE2 = 1 us
1145 * PWR0TXPE = 8 or 6 */
1146 if (priv->pdev->revision < ADM8211_REV_CA)
1147 ADM8211_CSR_WRITE(TOFS2, 0x8815cd18);
1148 else
1149 ADM8211_CSR_WRITE(TOFS2, 0x8535cd16);
1150
1151 /* Enable store and forward for transmit */
1152 priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB;
1153 ADM8211_CSR_WRITE(NAR, priv->nar);
1154
1155 /* Reset RF */
1156 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO);
1157 ADM8211_CSR_READ(SYNRF);
1158 msleep(10);
1159 ADM8211_CSR_WRITE(SYNRF, 0);
1160 ADM8211_CSR_READ(SYNRF);
1161 msleep(5);
1162
1163 /* Set CFP Max Duration to 0x10 TU */
1164 reg = ADM8211_CSR_READ(CFPP);
1165 reg &= ~(0xffff << 8);
1166 reg |= 0x0010 << 8;
1167 ADM8211_CSR_WRITE(CFPP, reg);
1168
1169 /* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us
1170 * TUCNT = 0x3ff - Tu counter 1024 us */
1171 ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff);
1172
1173 /* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us),
1174 * DIFS=50 us, EIFS=100 us */
1175 if (priv->pdev->revision < ADM8211_REV_CA)
1176 ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) |
1177 (50 << 9) | 100);
1178 else
1179 ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) |
1180 (50 << 9) | 100);
1181
1182 /* PCNT = 1 (MAC idle time awake/sleep, unit S)
1183 * RMRD = 2346 * 8 + 1 us (max RX duration) */
1184 ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769);
1185
1186 /* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */
1187 ADM8211_CSR_WRITE(RSPT, 0xffffff00);
1188
1189 /* Initialize BBP (and SYN) */
1190 adm8211_hw_init_bbp(dev);
1191
1192 /* make sure interrupts are off */
1193 ADM8211_CSR_WRITE(IER, 0);
1194
1195 /* ACK interrupts */
1196 ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR));
1197
1198 /* Setup WEP (turns it off for now) */
1199 reg = ADM8211_CSR_READ(MACTEST);
1200 reg &= ~(7 << 20);
1201 ADM8211_CSR_WRITE(MACTEST, reg);
1202
1203 reg = ADM8211_CSR_READ(WEPCTL);
1204 reg &= ~ADM8211_WEPCTL_WEPENABLE;
1205 reg |= ADM8211_WEPCTL_WEPRXBYP;
1206 ADM8211_CSR_WRITE(WEPCTL, reg);
1207
1208 /* Clear the missed-packet counter. */
1209 ADM8211_CSR_READ(LPC);
1210 }
1211
1212 static int adm8211_hw_reset(struct ieee80211_hw *dev)
1213 {
1214 struct adm8211_priv *priv = dev->priv;
1215 u32 reg, tmp;
1216 int timeout = 100;
1217
1218 /* Power-on issue */
1219 /* TODO: check if this is necessary */
1220 ADM8211_CSR_WRITE(FRCTL, 0);
1221
1222 /* Reset the chip */
1223 tmp = ADM8211_CSR_READ(PAR);
1224 ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR);
1225
1226 while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--)
1227 msleep(50);
1228
1229 if (timeout <= 0)
1230 return -ETIMEDOUT;
1231
1232 ADM8211_CSR_WRITE(PAR, tmp);
1233
1234 if (priv->pdev->revision == ADM8211_REV_BA &&
1235 (priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER ||
1236 priv->transceiver_type == ADM8211_RFMD2958)) {
1237 reg = ADM8211_CSR_READ(CSR_TEST1);
1238 reg |= (1 << 4) | (1 << 5);
1239 ADM8211_CSR_WRITE(CSR_TEST1, reg);
1240 } else if (priv->pdev->revision == ADM8211_REV_CA) {
1241 reg = ADM8211_CSR_READ(CSR_TEST1);
1242 reg &= ~((1 << 4) | (1 << 5));
1243 ADM8211_CSR_WRITE(CSR_TEST1, reg);
1244 }
1245
1246 ADM8211_CSR_WRITE(FRCTL, 0);
1247
1248 reg = ADM8211_CSR_READ(CSR_TEST0);
1249 reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */
1250 ADM8211_CSR_WRITE(CSR_TEST0, reg);
1251
1252 adm8211_clear_sram(dev);
1253
1254 return 0;
1255 }
1256
1257 static u64 adm8211_get_tsft(struct ieee80211_hw *dev,
1258 struct ieee80211_vif *vif)
1259 {
1260 struct adm8211_priv *priv = dev->priv;
1261 u32 tsftl;
1262 u64 tsft;
1263
1264 tsftl = ADM8211_CSR_READ(TSFTL);
1265 tsft = ADM8211_CSR_READ(TSFTH);
1266 tsft <<= 32;
1267 tsft |= tsftl;
1268
1269 return tsft;
1270 }
1271
1272 static void adm8211_set_interval(struct ieee80211_hw *dev,
1273 unsigned short bi, unsigned short li)
1274 {
1275 struct adm8211_priv *priv = dev->priv;
1276 u32 reg;
1277
1278 /* BP (beacon interval) = data->beacon_interval
1279 * LI (listen interval) = data->listen_interval (in beacon intervals) */
1280 reg = (bi << 16) | li;
1281 ADM8211_CSR_WRITE(BPLI, reg);
1282 }
1283
1284 static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid)
1285 {
1286 struct adm8211_priv *priv = dev->priv;
1287 u32 reg;
1288
1289 ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid));
1290 reg = ADM8211_CSR_READ(ABDA1);
1291 reg &= 0x0000ffff;
1292 reg |= (bssid[4] << 16) | (bssid[5] << 24);
1293 ADM8211_CSR_WRITE(ABDA1, reg);
1294 }
1295
1296 static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
1297 {
1298 struct adm8211_priv *priv = dev->priv;
1299 struct ieee80211_conf *conf = &dev->conf;
1300 int channel =
1301 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
1302
1303 if (channel != priv->channel) {
1304 priv->channel = channel;
1305 adm8211_rf_set_channel(dev, priv->channel);
1306 }
1307
1308 return 0;
1309 }
1310
1311 static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
1312 struct ieee80211_vif *vif,
1313 struct ieee80211_bss_conf *conf,
1314 u32 changes)
1315 {
1316 struct adm8211_priv *priv = dev->priv;
1317
1318 if (!(changes & BSS_CHANGED_BSSID))
1319 return;
1320
1321 if (!ether_addr_equal(conf->bssid, priv->bssid)) {
1322 adm8211_set_bssid(dev, conf->bssid);
1323 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
1324 }
1325 }
1326
1327 static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
1328 struct netdev_hw_addr_list *mc_list)
1329 {
1330 unsigned int bit_nr;
1331 u32 mc_filter[2];
1332 struct netdev_hw_addr *ha;
1333
1334 mc_filter[1] = mc_filter[0] = 0;
1335
1336 netdev_hw_addr_list_for_each(ha, mc_list) {
1337 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1338
1339 bit_nr &= 0x3F;
1340 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1341 }
1342
1343 return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
1344 }
1345
1346 static void adm8211_configure_filter(struct ieee80211_hw *dev,
1347 unsigned int changed_flags,
1348 unsigned int *total_flags,
1349 u64 multicast)
1350 {
1351 static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1352 struct adm8211_priv *priv = dev->priv;
1353 unsigned int new_flags;
1354 u32 mc_filter[2];
1355
1356 mc_filter[0] = multicast;
1357 mc_filter[1] = multicast >> 32;
1358
1359 new_flags = 0;
1360
1361 if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
1362 new_flags |= FIF_ALLMULTI;
1363 priv->nar &= ~ADM8211_NAR_PR;
1364 priv->nar |= ADM8211_NAR_MM;
1365 mc_filter[1] = mc_filter[0] = ~0;
1366 } else {
1367 priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR);
1368 }
1369
1370 ADM8211_IDLE_RX();
1371
1372 ADM8211_CSR_WRITE(MAR0, mc_filter[0]);
1373 ADM8211_CSR_WRITE(MAR1, mc_filter[1]);
1374 ADM8211_CSR_READ(NAR);
1375
1376 if (priv->nar & ADM8211_NAR_PR)
1377 ieee80211_hw_set(dev, RX_INCLUDES_FCS);
1378 else
1379 __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, dev->flags);
1380
1381 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
1382 adm8211_set_bssid(dev, bcast);
1383 else
1384 adm8211_set_bssid(dev, priv->bssid);
1385
1386 ADM8211_RESTORE();
1387
1388 *total_flags = new_flags;
1389 }
1390
1391 static int adm8211_add_interface(struct ieee80211_hw *dev,
1392 struct ieee80211_vif *vif)
1393 {
1394 struct adm8211_priv *priv = dev->priv;
1395 if (priv->mode != NL80211_IFTYPE_MONITOR)
1396 return -EOPNOTSUPP;
1397
1398 switch (vif->type) {
1399 case NL80211_IFTYPE_STATION:
1400 priv->mode = vif->type;
1401 break;
1402 default:
1403 return -EOPNOTSUPP;
1404 }
1405
1406 ADM8211_IDLE();
1407
1408 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
1409 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
1410
1411 adm8211_update_mode(dev);
1412
1413 ADM8211_RESTORE();
1414
1415 return 0;
1416 }
1417
1418 static void adm8211_remove_interface(struct ieee80211_hw *dev,
1419 struct ieee80211_vif *vif)
1420 {
1421 struct adm8211_priv *priv = dev->priv;
1422 priv->mode = NL80211_IFTYPE_MONITOR;
1423 }
1424
1425 static int adm8211_init_rings(struct ieee80211_hw *dev)
1426 {
1427 struct adm8211_priv *priv = dev->priv;
1428 struct adm8211_desc *desc = NULL;
1429 struct adm8211_rx_ring_info *rx_info;
1430 struct adm8211_tx_ring_info *tx_info;
1431 unsigned int i;
1432
1433 for (i = 0; i < priv->rx_ring_size; i++) {
1434 desc = &priv->rx_ring[i];
1435 desc->status = 0;
1436 desc->length = cpu_to_le32(RX_PKT_SIZE);
1437 priv->rx_buffers[i].skb = NULL;
1438 }
1439 /* Mark the end of RX ring; hw returns to base address after this
1440 * descriptor */
1441 desc->length |= cpu_to_le32(RDES1_CONTROL_RER);
1442
1443 for (i = 0; i < priv->rx_ring_size; i++) {
1444 desc = &priv->rx_ring[i];
1445 rx_info = &priv->rx_buffers[i];
1446
1447 rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
1448 if (rx_info->skb == NULL)
1449 break;
1450 rx_info->mapping = pci_map_single(priv->pdev,
1451 skb_tail_pointer(rx_info->skb),
1452 RX_PKT_SIZE,
1453 PCI_DMA_FROMDEVICE);
1454 desc->buffer1 = cpu_to_le32(rx_info->mapping);
1455 desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
1456 }
1457
1458 /* Setup TX ring. TX buffers descriptors will be filled in as needed */
1459 for (i = 0; i < priv->tx_ring_size; i++) {
1460 desc = &priv->tx_ring[i];
1461 tx_info = &priv->tx_buffers[i];
1462
1463 tx_info->skb = NULL;
1464 tx_info->mapping = 0;
1465 desc->status = 0;
1466 }
1467 desc->length = cpu_to_le32(TDES1_CONTROL_TER);
1468
1469 priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0;
1470 ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma);
1471 ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma);
1472
1473 return 0;
1474 }
1475
1476 static void adm8211_free_rings(struct ieee80211_hw *dev)
1477 {
1478 struct adm8211_priv *priv = dev->priv;
1479 unsigned int i;
1480
1481 for (i = 0; i < priv->rx_ring_size; i++) {
1482 if (!priv->rx_buffers[i].skb)
1483 continue;
1484
1485 pci_unmap_single(
1486 priv->pdev,
1487 priv->rx_buffers[i].mapping,
1488 RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
1489
1490 dev_kfree_skb(priv->rx_buffers[i].skb);
1491 }
1492
1493 for (i = 0; i < priv->tx_ring_size; i++) {
1494 if (!priv->tx_buffers[i].skb)
1495 continue;
1496
1497 pci_unmap_single(priv->pdev,
1498 priv->tx_buffers[i].mapping,
1499 priv->tx_buffers[i].skb->len,
1500 PCI_DMA_TODEVICE);
1501
1502 dev_kfree_skb(priv->tx_buffers[i].skb);
1503 }
1504 }
1505
1506 static int adm8211_start(struct ieee80211_hw *dev)
1507 {
1508 struct adm8211_priv *priv = dev->priv;
1509 int retval;
1510
1511 /* Power up MAC and RF chips */
1512 retval = adm8211_hw_reset(dev);
1513 if (retval) {
1514 wiphy_err(dev->wiphy, "hardware reset failed\n");
1515 goto fail;
1516 }
1517
1518 retval = adm8211_init_rings(dev);
1519 if (retval) {
1520 wiphy_err(dev->wiphy, "failed to initialize rings\n");
1521 goto fail;
1522 }
1523
1524 /* Init hardware */
1525 adm8211_hw_init(dev);
1526 adm8211_rf_set_channel(dev, priv->channel);
1527
1528 retval = request_irq(priv->pdev->irq, adm8211_interrupt,
1529 IRQF_SHARED, "adm8211", dev);
1530 if (retval) {
1531 wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
1532 goto fail;
1533 }
1534
1535 ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE |
1536 ADM8211_IER_RCIE | ADM8211_IER_TCIE |
1537 ADM8211_IER_TDUIE | ADM8211_IER_GPTIE);
1538 priv->mode = NL80211_IFTYPE_MONITOR;
1539 adm8211_update_mode(dev);
1540 ADM8211_CSR_WRITE(RDR, 0);
1541
1542 adm8211_set_interval(dev, 100, 10);
1543 return 0;
1544
1545 fail:
1546 return retval;
1547 }
1548
1549 static void adm8211_stop(struct ieee80211_hw *dev)
1550 {
1551 struct adm8211_priv *priv = dev->priv;
1552
1553 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1554 priv->nar = 0;
1555 ADM8211_CSR_WRITE(NAR, 0);
1556 ADM8211_CSR_WRITE(IER, 0);
1557 ADM8211_CSR_READ(NAR);
1558
1559 free_irq(priv->pdev->irq, dev);
1560
1561 adm8211_free_rings(dev);
1562 }
1563
1564 static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len,
1565 int plcp_signal, int short_preamble)
1566 {
1567 /* Alternative calculation from NetBSD: */
1568
1569 /* IEEE 802.11b durations for DSSS PHY in microseconds */
1570 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
1571 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
1572 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
1573 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
1574 #define IEEE80211_DUR_DS_SLOW_ACK 112
1575 #define IEEE80211_DUR_DS_FAST_ACK 56
1576 #define IEEE80211_DUR_DS_SLOW_CTS 112
1577 #define IEEE80211_DUR_DS_FAST_CTS 56
1578 #define IEEE80211_DUR_DS_SLOT 20
1579 #define IEEE80211_DUR_DS_SIFS 10
1580
1581 int remainder;
1582
1583 *dur = (80 * (24 + payload_len) + plcp_signal - 1)
1584 / plcp_signal;
1585
1586 if (plcp_signal <= PLCP_SIGNAL_2M)
1587 /* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */
1588 *dur += 3 * (IEEE80211_DUR_DS_SIFS +
1589 IEEE80211_DUR_DS_SHORT_PREAMBLE +
1590 IEEE80211_DUR_DS_FAST_PLCPHDR) +
1591 IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK;
1592 else
1593 /* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */
1594 *dur += 3 * (IEEE80211_DUR_DS_SIFS +
1595 IEEE80211_DUR_DS_SHORT_PREAMBLE +
1596 IEEE80211_DUR_DS_FAST_PLCPHDR) +
1597 IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK;
1598
1599 /* lengthen duration if long preamble */
1600 if (!short_preamble)
1601 *dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE -
1602 IEEE80211_DUR_DS_SHORT_PREAMBLE) +
1603 3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR -
1604 IEEE80211_DUR_DS_FAST_PLCPHDR);
1605
1606
1607 *plcp = (80 * len) / plcp_signal;
1608 remainder = (80 * len) % plcp_signal;
1609 if (plcp_signal == PLCP_SIGNAL_11M &&
1610 remainder <= 30 && remainder > 0)
1611 *plcp = (*plcp | 0x8000) + 1;
1612 else if (remainder)
1613 (*plcp)++;
1614 }
1615
1616 /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
1617 static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1618 u16 plcp_signal,
1619 size_t hdrlen)
1620 {
1621 struct adm8211_priv *priv = dev->priv;
1622 unsigned long flags;
1623 dma_addr_t mapping;
1624 unsigned int entry;
1625 u32 flag;
1626
1627 mapping = pci_map_single(priv->pdev, skb->data, skb->len,
1628 PCI_DMA_TODEVICE);
1629
1630 spin_lock_irqsave(&priv->lock, flags);
1631
1632 if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2)
1633 flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS;
1634 else
1635 flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS;
1636
1637 if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2)
1638 ieee80211_stop_queue(dev, 0);
1639
1640 entry = priv->cur_tx % priv->tx_ring_size;
1641
1642 priv->tx_buffers[entry].skb = skb;
1643 priv->tx_buffers[entry].mapping = mapping;
1644 priv->tx_buffers[entry].hdrlen = hdrlen;
1645 priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
1646
1647 if (entry == priv->tx_ring_size - 1)
1648 flag |= TDES1_CONTROL_TER;
1649 priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len);
1650
1651 /* Set TX rate (SIGNAL field in PLCP PPDU format) */
1652 flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */;
1653 priv->tx_ring[entry].status = cpu_to_le32(flag);
1654
1655 priv->cur_tx++;
1656
1657 spin_unlock_irqrestore(&priv->lock, flags);
1658
1659 /* Trigger transmit poll */
1660 ADM8211_CSR_WRITE(TDR, 0);
1661 }
1662
1663 /* Put adm8211_tx_hdr on skb and transmit */
1664 static void adm8211_tx(struct ieee80211_hw *dev,
1665 struct ieee80211_tx_control *control,
1666 struct sk_buff *skb)
1667 {
1668 struct adm8211_tx_hdr *txhdr;
1669 size_t payload_len, hdrlen;
1670 int plcp, dur, len, plcp_signal, short_preamble;
1671 struct ieee80211_hdr *hdr;
1672 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1673 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info);
1674 u8 rc_flags;
1675
1676 rc_flags = info->control.rates[0].flags;
1677 short_preamble = !!(rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1678 plcp_signal = txrate->bitrate;
1679
1680 hdr = (struct ieee80211_hdr *)skb->data;
1681 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1682 memcpy(skb->cb, skb->data, hdrlen);
1683 hdr = (struct ieee80211_hdr *)skb->cb;
1684 skb_pull(skb, hdrlen);
1685 payload_len = skb->len;
1686
1687 txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr));
1688 memset(txhdr, 0, sizeof(*txhdr));
1689 memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN);
1690 txhdr->signal = plcp_signal;
1691 txhdr->frame_body_size = cpu_to_le16(payload_len);
1692 txhdr->frame_control = hdr->frame_control;
1693
1694 len = hdrlen + payload_len + FCS_LEN;
1695
1696 txhdr->frag = cpu_to_le16(0x0FFF);
1697 adm8211_calc_durations(&dur, &plcp, payload_len,
1698 len, plcp_signal, short_preamble);
1699 txhdr->plcp_frag_head_len = cpu_to_le16(plcp);
1700 txhdr->plcp_frag_tail_len = cpu_to_le16(plcp);
1701 txhdr->dur_frag_head = cpu_to_le16(dur);
1702 txhdr->dur_frag_tail = cpu_to_le16(dur);
1703
1704 txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER);
1705
1706 if (short_preamble)
1707 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE);
1708
1709 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
1710 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS);
1711
1712 txhdr->retry_limit = info->control.rates[0].count;
1713
1714 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
1715 }
1716
1717 static int adm8211_alloc_rings(struct ieee80211_hw *dev)
1718 {
1719 struct adm8211_priv *priv = dev->priv;
1720 unsigned int ring_size;
1721
1722 priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size +
1723 sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL);
1724 if (!priv->rx_buffers)
1725 return -ENOMEM;
1726
1727 priv->tx_buffers = (void *)priv->rx_buffers +
1728 sizeof(*priv->rx_buffers) * priv->rx_ring_size;
1729
1730 /* Allocate TX/RX descriptors */
1731 ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size +
1732 sizeof(struct adm8211_desc) * priv->tx_ring_size;
1733 priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size,
1734 &priv->rx_ring_dma);
1735
1736 if (!priv->rx_ring) {
1737 kfree(priv->rx_buffers);
1738 priv->rx_buffers = NULL;
1739 priv->tx_buffers = NULL;
1740 return -ENOMEM;
1741 }
1742
1743 priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
1744 priv->tx_ring_dma = priv->rx_ring_dma +
1745 sizeof(struct adm8211_desc) * priv->rx_ring_size;
1746
1747 return 0;
1748 }
1749
1750 static const struct ieee80211_ops adm8211_ops = {
1751 .tx = adm8211_tx,
1752 .start = adm8211_start,
1753 .stop = adm8211_stop,
1754 .add_interface = adm8211_add_interface,
1755 .remove_interface = adm8211_remove_interface,
1756 .config = adm8211_config,
1757 .bss_info_changed = adm8211_bss_info_changed,
1758 .prepare_multicast = adm8211_prepare_multicast,
1759 .configure_filter = adm8211_configure_filter,
1760 .get_stats = adm8211_get_stats,
1761 .get_tsf = adm8211_get_tsft
1762 };
1763
1764 static int adm8211_probe(struct pci_dev *pdev,
1765 const struct pci_device_id *id)
1766 {
1767 struct ieee80211_hw *dev;
1768 struct adm8211_priv *priv;
1769 unsigned long mem_addr, mem_len;
1770 unsigned int io_addr, io_len;
1771 int err;
1772 u32 reg;
1773 u8 perm_addr[ETH_ALEN];
1774
1775 err = pci_enable_device(pdev);
1776 if (err) {
1777 printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n",
1778 pci_name(pdev));
1779 return err;
1780 }
1781
1782 io_addr = pci_resource_start(pdev, 0);
1783 io_len = pci_resource_len(pdev, 0);
1784 mem_addr = pci_resource_start(pdev, 1);
1785 mem_len = pci_resource_len(pdev, 1);
1786 if (io_len < 256 || mem_len < 1024) {
1787 printk(KERN_ERR "%s (adm8211): Too short PCI resources\n",
1788 pci_name(pdev));
1789 goto err_disable_pdev;
1790 }
1791
1792
1793 /* check signature */
1794 pci_read_config_dword(pdev, 0x80 /* CR32 */, ®);
1795 if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) {
1796 printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n",
1797 pci_name(pdev), reg);
1798 goto err_disable_pdev;
1799 }
1800
1801 err = pci_request_regions(pdev, "adm8211");
1802 if (err) {
1803 printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n",
1804 pci_name(pdev));
1805 return err; /* someone else grabbed it? don't disable it */
1806 }
1807
1808 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
1809 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1810 printk(KERN_ERR "%s (adm8211): No suitable DMA available\n",
1811 pci_name(pdev));
1812 goto err_free_reg;
1813 }
1814
1815 pci_set_master(pdev);
1816
1817 dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops);
1818 if (!dev) {
1819 printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n",
1820 pci_name(pdev));
1821 err = -ENOMEM;
1822 goto err_free_reg;
1823 }
1824 priv = dev->priv;
1825 priv->pdev = pdev;
1826
1827 spin_lock_init(&priv->lock);
1828
1829 SET_IEEE80211_DEV(dev, &pdev->dev);
1830
1831 pci_set_drvdata(pdev, dev);
1832
1833 priv->map = pci_iomap(pdev, 1, mem_len);
1834 if (!priv->map)
1835 priv->map = pci_iomap(pdev, 0, io_len);
1836
1837 if (!priv->map) {
1838 printk(KERN_ERR "%s (adm8211): Cannot map device memory\n",
1839 pci_name(pdev));
1840 err = -ENOMEM;
1841 goto err_free_dev;
1842 }
1843
1844 priv->rx_ring_size = rx_ring_size;
1845 priv->tx_ring_size = tx_ring_size;
1846
1847 if (adm8211_alloc_rings(dev)) {
1848 printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
1849 pci_name(pdev));
1850 goto err_iounmap;
1851 }
1852
1853 *(__le32 *)perm_addr = cpu_to_le32(ADM8211_CSR_READ(PAR0));
1854 *(__le16 *)&perm_addr[4] =
1855 cpu_to_le16(ADM8211_CSR_READ(PAR1) & 0xFFFF);
1856
1857 if (!is_valid_ether_addr(perm_addr)) {
1858 printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n",
1859 pci_name(pdev));
1860 eth_random_addr(perm_addr);
1861 }
1862 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
1863
1864 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
1865 /* dev->flags = RX_INCLUDES_FCS in promisc mode */
1866 ieee80211_hw_set(dev, SIGNAL_UNSPEC);
1867 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1868
1869 dev->max_signal = 100; /* FIXME: find better value */
1870
1871 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
1872
1873 priv->retry_limit = 3;
1874 priv->ant_power = 0x40;
1875 priv->tx_power = 0x40;
1876 priv->lpf_cutoff = 0xFF;
1877 priv->lnags_threshold = 0xFF;
1878 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1879
1880 /* Power-on issue. EEPROM won't read correctly without */
1881 if (pdev->revision >= ADM8211_REV_BA) {
1882 ADM8211_CSR_WRITE(FRCTL, 0);
1883 ADM8211_CSR_READ(FRCTL);
1884 ADM8211_CSR_WRITE(FRCTL, 1);
1885 ADM8211_CSR_READ(FRCTL);
1886 msleep(100);
1887 }
1888
1889 err = adm8211_read_eeprom(dev);
1890 if (err) {
1891 printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n",
1892 pci_name(pdev));
1893 goto err_free_desc;
1894 }
1895
1896 priv->channel = 1;
1897
1898 dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
1899
1900 err = ieee80211_register_hw(dev);
1901 if (err) {
1902 printk(KERN_ERR "%s (adm8211): Cannot register device\n",
1903 pci_name(pdev));
1904 goto err_free_eeprom;
1905 }
1906
1907 wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
1908 dev->wiphy->perm_addr, pdev->revision);
1909
1910 return 0;
1911
1912 err_free_eeprom:
1913 kfree(priv->eeprom);
1914
1915 err_free_desc:
1916 pci_free_consistent(pdev,
1917 sizeof(struct adm8211_desc) * priv->rx_ring_size +
1918 sizeof(struct adm8211_desc) * priv->tx_ring_size,
1919 priv->rx_ring, priv->rx_ring_dma);
1920 kfree(priv->rx_buffers);
1921
1922 err_iounmap:
1923 pci_iounmap(pdev, priv->map);
1924
1925 err_free_dev:
1926 ieee80211_free_hw(dev);
1927
1928 err_free_reg:
1929 pci_release_regions(pdev);
1930
1931 err_disable_pdev:
1932 pci_disable_device(pdev);
1933 return err;
1934 }
1935
1936
1937 static void adm8211_remove(struct pci_dev *pdev)
1938 {
1939 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
1940 struct adm8211_priv *priv;
1941
1942 if (!dev)
1943 return;
1944
1945 ieee80211_unregister_hw(dev);
1946
1947 priv = dev->priv;
1948
1949 pci_free_consistent(pdev,
1950 sizeof(struct adm8211_desc) * priv->rx_ring_size +
1951 sizeof(struct adm8211_desc) * priv->tx_ring_size,
1952 priv->rx_ring, priv->rx_ring_dma);
1953
1954 kfree(priv->rx_buffers);
1955 kfree(priv->eeprom);
1956 pci_iounmap(pdev, priv->map);
1957 pci_release_regions(pdev);
1958 pci_disable_device(pdev);
1959 ieee80211_free_hw(dev);
1960 }
1961
1962
1963 #ifdef CONFIG_PM
1964 static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)
1965 {
1966 pci_save_state(pdev);
1967 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1968 return 0;
1969 }
1970
1971 static int adm8211_resume(struct pci_dev *pdev)
1972 {
1973 pci_set_power_state(pdev, PCI_D0);
1974 pci_restore_state(pdev);
1975 return 0;
1976 }
1977 #endif /* CONFIG_PM */
1978
1979
1980 MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table);
1981
1982 /* TODO: implement enable_wake */
1983 static struct pci_driver adm8211_driver = {
1984 .name = "adm8211",
1985 .id_table = adm8211_pci_id_table,
1986 .probe = adm8211_probe,
1987 .remove = adm8211_remove,
1988 #ifdef CONFIG_PM
1989 .suspend = adm8211_suspend,
1990 .resume = adm8211_resume,
1991 #endif /* CONFIG_PM */
1992 };
1993
1994 module_pci_driver(adm8211_driver);
1995
1996
1997
1998
1999
2000 /* LDV_COMMENT_BEGIN_MAIN */
2001 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2002
2003 /*###########################################################################*/
2004
2005 /*############## Driver Environment Generator 0.2 output ####################*/
2006
2007 /*###########################################################################*/
2008
2009
2010
2011 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2012 void ldv_check_final_state(void);
2013
2014 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2015 void ldv_check_return_value(int res);
2016
2017 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2018 void ldv_check_return_value_probe(int res);
2019
2020 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2021 void ldv_initialize(void);
2022
2023 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2024 void ldv_handler_precall(void);
2025
2026 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2027 int nondet_int(void);
2028
2029 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2030 int LDV_IN_INTERRUPT;
2031
2032 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2033 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2034
2035
2036
2037 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2038 /*============================= VARIABLE DECLARATION PART =============================*/
2039 /** STRUCT: struct type: eeprom_93cx6, struct name: eeprom **/
2040 /* content: static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)*/
2041 /* LDV_COMMENT_END_PREP */
2042 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_eeprom_register_read" */
2043 struct eeprom_93cx6 * var_group1;
2044 /* LDV_COMMENT_BEGIN_PREP */
2045 #define ADM8211_INT(x) \
2046 do { \
2047 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2048 wiphy_debug(dev->wiphy, "%s\n", #x); \
2049 } while (0)
2050 #undef ADM8211_INT
2051 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2052 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2053 u16 addr, u32 value) { \
2054 struct adm8211_priv *priv = dev->priv; \
2055 unsigned int i; \
2056 u32 reg, bitbuf; \
2057 \
2058 value &= v_mask; \
2059 addr &= a_mask; \
2060 bitbuf = (value << v_shift) | (addr << a_shift); \
2061 \
2062 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2063 ADM8211_CSR_READ(SYNRF); \
2064 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2065 ADM8211_CSR_READ(SYNRF); \
2066 \
2067 if (prewrite) { \
2068 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2069 ADM8211_CSR_READ(SYNRF); \
2070 } \
2071 \
2072 for (i = 0; i <= bits; i++) { \
2073 if (bitbuf & (1 << (bits - i))) \
2074 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2075 else \
2076 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2077 \
2078 ADM8211_CSR_WRITE(SYNRF, reg); \
2079 ADM8211_CSR_READ(SYNRF); \
2080 \
2081 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2082 ADM8211_CSR_READ(SYNRF); \
2083 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2084 ADM8211_CSR_READ(SYNRF); \
2085 } \
2086 \
2087 if (postwrite == 1) { \
2088 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2089 ADM8211_CSR_READ(SYNRF); \
2090 } \
2091 if (postwrite == 2) { \
2092 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2093 ADM8211_CSR_READ(SYNRF); \
2094 } \
2095 \
2096 ADM8211_CSR_WRITE(SYNRF, 0); \
2097 ADM8211_CSR_READ(SYNRF); \
2098 }
2099 #undef WRITE_SYN
2100 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2101 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2102 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2103 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2104 #define IEEE80211_DUR_DS_SLOW_ACK 112
2105 #define IEEE80211_DUR_DS_FAST_ACK 56
2106 #define IEEE80211_DUR_DS_SLOW_CTS 112
2107 #define IEEE80211_DUR_DS_FAST_CTS 56
2108 #define IEEE80211_DUR_DS_SLOT 20
2109 #define IEEE80211_DUR_DS_SIFS 10
2110 #ifdef CONFIG_PM
2111 #endif
2112 #ifdef CONFIG_PM
2113 #endif
2114 /* LDV_COMMENT_END_PREP */
2115 /* content: static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom)*/
2116 /* LDV_COMMENT_END_PREP */
2117 /* LDV_COMMENT_BEGIN_PREP */
2118 #define ADM8211_INT(x) \
2119 do { \
2120 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2121 wiphy_debug(dev->wiphy, "%s\n", #x); \
2122 } while (0)
2123 #undef ADM8211_INT
2124 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2125 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2126 u16 addr, u32 value) { \
2127 struct adm8211_priv *priv = dev->priv; \
2128 unsigned int i; \
2129 u32 reg, bitbuf; \
2130 \
2131 value &= v_mask; \
2132 addr &= a_mask; \
2133 bitbuf = (value << v_shift) | (addr << a_shift); \
2134 \
2135 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2136 ADM8211_CSR_READ(SYNRF); \
2137 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2138 ADM8211_CSR_READ(SYNRF); \
2139 \
2140 if (prewrite) { \
2141 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2142 ADM8211_CSR_READ(SYNRF); \
2143 } \
2144 \
2145 for (i = 0; i <= bits; i++) { \
2146 if (bitbuf & (1 << (bits - i))) \
2147 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2148 else \
2149 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2150 \
2151 ADM8211_CSR_WRITE(SYNRF, reg); \
2152 ADM8211_CSR_READ(SYNRF); \
2153 \
2154 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2155 ADM8211_CSR_READ(SYNRF); \
2156 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2157 ADM8211_CSR_READ(SYNRF); \
2158 } \
2159 \
2160 if (postwrite == 1) { \
2161 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2162 ADM8211_CSR_READ(SYNRF); \
2163 } \
2164 if (postwrite == 2) { \
2165 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2166 ADM8211_CSR_READ(SYNRF); \
2167 } \
2168 \
2169 ADM8211_CSR_WRITE(SYNRF, 0); \
2170 ADM8211_CSR_READ(SYNRF); \
2171 }
2172 #undef WRITE_SYN
2173 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2174 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2175 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2176 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2177 #define IEEE80211_DUR_DS_SLOW_ACK 112
2178 #define IEEE80211_DUR_DS_FAST_ACK 56
2179 #define IEEE80211_DUR_DS_SLOW_CTS 112
2180 #define IEEE80211_DUR_DS_FAST_CTS 56
2181 #define IEEE80211_DUR_DS_SLOT 20
2182 #define IEEE80211_DUR_DS_SIFS 10
2183 #ifdef CONFIG_PM
2184 #endif
2185 #ifdef CONFIG_PM
2186 #endif
2187 /* LDV_COMMENT_END_PREP */
2188
2189 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
2190 /* content: static void adm8211_tx(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb)*/
2191 /* LDV_COMMENT_BEGIN_PREP */
2192 #define ADM8211_INT(x) \
2193 do { \
2194 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2195 wiphy_debug(dev->wiphy, "%s\n", #x); \
2196 } while (0)
2197 #undef ADM8211_INT
2198 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2199 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2200 u16 addr, u32 value) { \
2201 struct adm8211_priv *priv = dev->priv; \
2202 unsigned int i; \
2203 u32 reg, bitbuf; \
2204 \
2205 value &= v_mask; \
2206 addr &= a_mask; \
2207 bitbuf = (value << v_shift) | (addr << a_shift); \
2208 \
2209 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2210 ADM8211_CSR_READ(SYNRF); \
2211 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2212 ADM8211_CSR_READ(SYNRF); \
2213 \
2214 if (prewrite) { \
2215 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2216 ADM8211_CSR_READ(SYNRF); \
2217 } \
2218 \
2219 for (i = 0; i <= bits; i++) { \
2220 if (bitbuf & (1 << (bits - i))) \
2221 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2222 else \
2223 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2224 \
2225 ADM8211_CSR_WRITE(SYNRF, reg); \
2226 ADM8211_CSR_READ(SYNRF); \
2227 \
2228 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2229 ADM8211_CSR_READ(SYNRF); \
2230 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2231 ADM8211_CSR_READ(SYNRF); \
2232 } \
2233 \
2234 if (postwrite == 1) { \
2235 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2236 ADM8211_CSR_READ(SYNRF); \
2237 } \
2238 if (postwrite == 2) { \
2239 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2240 ADM8211_CSR_READ(SYNRF); \
2241 } \
2242 \
2243 ADM8211_CSR_WRITE(SYNRF, 0); \
2244 ADM8211_CSR_READ(SYNRF); \
2245 }
2246 #undef WRITE_SYN
2247 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2248 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2249 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2250 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2251 #define IEEE80211_DUR_DS_SLOW_ACK 112
2252 #define IEEE80211_DUR_DS_FAST_ACK 56
2253 #define IEEE80211_DUR_DS_SLOW_CTS 112
2254 #define IEEE80211_DUR_DS_FAST_CTS 56
2255 #define IEEE80211_DUR_DS_SLOT 20
2256 #define IEEE80211_DUR_DS_SIFS 10
2257 /* LDV_COMMENT_END_PREP */
2258 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_tx" */
2259 struct ieee80211_hw * var_group2;
2260 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_tx" */
2261 struct ieee80211_tx_control * var_group3;
2262 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_tx" */
2263 struct sk_buff * var_adm8211_tx_33_p2;
2264 /* LDV_COMMENT_BEGIN_PREP */
2265 #ifdef CONFIG_PM
2266 #endif
2267 #ifdef CONFIG_PM
2268 #endif
2269 /* LDV_COMMENT_END_PREP */
2270 /* content: static int adm8211_start(struct ieee80211_hw *dev)*/
2271 /* LDV_COMMENT_BEGIN_PREP */
2272 #define ADM8211_INT(x) \
2273 do { \
2274 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2275 wiphy_debug(dev->wiphy, "%s\n", #x); \
2276 } while (0)
2277 #undef ADM8211_INT
2278 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2279 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2280 u16 addr, u32 value) { \
2281 struct adm8211_priv *priv = dev->priv; \
2282 unsigned int i; \
2283 u32 reg, bitbuf; \
2284 \
2285 value &= v_mask; \
2286 addr &= a_mask; \
2287 bitbuf = (value << v_shift) | (addr << a_shift); \
2288 \
2289 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2290 ADM8211_CSR_READ(SYNRF); \
2291 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2292 ADM8211_CSR_READ(SYNRF); \
2293 \
2294 if (prewrite) { \
2295 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2296 ADM8211_CSR_READ(SYNRF); \
2297 } \
2298 \
2299 for (i = 0; i <= bits; i++) { \
2300 if (bitbuf & (1 << (bits - i))) \
2301 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2302 else \
2303 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2304 \
2305 ADM8211_CSR_WRITE(SYNRF, reg); \
2306 ADM8211_CSR_READ(SYNRF); \
2307 \
2308 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2309 ADM8211_CSR_READ(SYNRF); \
2310 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2311 ADM8211_CSR_READ(SYNRF); \
2312 } \
2313 \
2314 if (postwrite == 1) { \
2315 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2316 ADM8211_CSR_READ(SYNRF); \
2317 } \
2318 if (postwrite == 2) { \
2319 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2320 ADM8211_CSR_READ(SYNRF); \
2321 } \
2322 \
2323 ADM8211_CSR_WRITE(SYNRF, 0); \
2324 ADM8211_CSR_READ(SYNRF); \
2325 }
2326 #undef WRITE_SYN
2327 /* LDV_COMMENT_END_PREP */
2328 /* LDV_COMMENT_BEGIN_PREP */
2329 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2330 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2331 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2332 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2333 #define IEEE80211_DUR_DS_SLOW_ACK 112
2334 #define IEEE80211_DUR_DS_FAST_ACK 56
2335 #define IEEE80211_DUR_DS_SLOW_CTS 112
2336 #define IEEE80211_DUR_DS_FAST_CTS 56
2337 #define IEEE80211_DUR_DS_SLOT 20
2338 #define IEEE80211_DUR_DS_SIFS 10
2339 #ifdef CONFIG_PM
2340 #endif
2341 #ifdef CONFIG_PM
2342 #endif
2343 /* LDV_COMMENT_END_PREP */
2344 /* content: static void adm8211_stop(struct ieee80211_hw *dev)*/
2345 /* LDV_COMMENT_BEGIN_PREP */
2346 #define ADM8211_INT(x) \
2347 do { \
2348 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2349 wiphy_debug(dev->wiphy, "%s\n", #x); \
2350 } while (0)
2351 #undef ADM8211_INT
2352 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2353 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2354 u16 addr, u32 value) { \
2355 struct adm8211_priv *priv = dev->priv; \
2356 unsigned int i; \
2357 u32 reg, bitbuf; \
2358 \
2359 value &= v_mask; \
2360 addr &= a_mask; \
2361 bitbuf = (value << v_shift) | (addr << a_shift); \
2362 \
2363 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2364 ADM8211_CSR_READ(SYNRF); \
2365 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2366 ADM8211_CSR_READ(SYNRF); \
2367 \
2368 if (prewrite) { \
2369 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2370 ADM8211_CSR_READ(SYNRF); \
2371 } \
2372 \
2373 for (i = 0; i <= bits; i++) { \
2374 if (bitbuf & (1 << (bits - i))) \
2375 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2376 else \
2377 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2378 \
2379 ADM8211_CSR_WRITE(SYNRF, reg); \
2380 ADM8211_CSR_READ(SYNRF); \
2381 \
2382 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2383 ADM8211_CSR_READ(SYNRF); \
2384 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2385 ADM8211_CSR_READ(SYNRF); \
2386 } \
2387 \
2388 if (postwrite == 1) { \
2389 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2390 ADM8211_CSR_READ(SYNRF); \
2391 } \
2392 if (postwrite == 2) { \
2393 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2394 ADM8211_CSR_READ(SYNRF); \
2395 } \
2396 \
2397 ADM8211_CSR_WRITE(SYNRF, 0); \
2398 ADM8211_CSR_READ(SYNRF); \
2399 }
2400 #undef WRITE_SYN
2401 /* LDV_COMMENT_END_PREP */
2402 /* LDV_COMMENT_BEGIN_PREP */
2403 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2404 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2405 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2406 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2407 #define IEEE80211_DUR_DS_SLOW_ACK 112
2408 #define IEEE80211_DUR_DS_FAST_ACK 56
2409 #define IEEE80211_DUR_DS_SLOW_CTS 112
2410 #define IEEE80211_DUR_DS_FAST_CTS 56
2411 #define IEEE80211_DUR_DS_SLOT 20
2412 #define IEEE80211_DUR_DS_SIFS 10
2413 #ifdef CONFIG_PM
2414 #endif
2415 #ifdef CONFIG_PM
2416 #endif
2417 /* LDV_COMMENT_END_PREP */
2418 /* content: static int adm8211_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
2419 /* LDV_COMMENT_BEGIN_PREP */
2420 #define ADM8211_INT(x) \
2421 do { \
2422 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2423 wiphy_debug(dev->wiphy, "%s\n", #x); \
2424 } while (0)
2425 #undef ADM8211_INT
2426 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2427 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2428 u16 addr, u32 value) { \
2429 struct adm8211_priv *priv = dev->priv; \
2430 unsigned int i; \
2431 u32 reg, bitbuf; \
2432 \
2433 value &= v_mask; \
2434 addr &= a_mask; \
2435 bitbuf = (value << v_shift) | (addr << a_shift); \
2436 \
2437 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2438 ADM8211_CSR_READ(SYNRF); \
2439 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2440 ADM8211_CSR_READ(SYNRF); \
2441 \
2442 if (prewrite) { \
2443 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2444 ADM8211_CSR_READ(SYNRF); \
2445 } \
2446 \
2447 for (i = 0; i <= bits; i++) { \
2448 if (bitbuf & (1 << (bits - i))) \
2449 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2450 else \
2451 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2452 \
2453 ADM8211_CSR_WRITE(SYNRF, reg); \
2454 ADM8211_CSR_READ(SYNRF); \
2455 \
2456 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2457 ADM8211_CSR_READ(SYNRF); \
2458 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2459 ADM8211_CSR_READ(SYNRF); \
2460 } \
2461 \
2462 if (postwrite == 1) { \
2463 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2464 ADM8211_CSR_READ(SYNRF); \
2465 } \
2466 if (postwrite == 2) { \
2467 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2468 ADM8211_CSR_READ(SYNRF); \
2469 } \
2470 \
2471 ADM8211_CSR_WRITE(SYNRF, 0); \
2472 ADM8211_CSR_READ(SYNRF); \
2473 }
2474 #undef WRITE_SYN
2475 /* LDV_COMMENT_END_PREP */
2476 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_add_interface" */
2477 struct ieee80211_vif * var_group4;
2478 /* LDV_COMMENT_BEGIN_PREP */
2479 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2480 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2481 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2482 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2483 #define IEEE80211_DUR_DS_SLOW_ACK 112
2484 #define IEEE80211_DUR_DS_FAST_ACK 56
2485 #define IEEE80211_DUR_DS_SLOW_CTS 112
2486 #define IEEE80211_DUR_DS_FAST_CTS 56
2487 #define IEEE80211_DUR_DS_SLOT 20
2488 #define IEEE80211_DUR_DS_SIFS 10
2489 #ifdef CONFIG_PM
2490 #endif
2491 #ifdef CONFIG_PM
2492 #endif
2493 /* LDV_COMMENT_END_PREP */
2494 /* content: static void adm8211_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
2495 /* LDV_COMMENT_BEGIN_PREP */
2496 #define ADM8211_INT(x) \
2497 do { \
2498 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2499 wiphy_debug(dev->wiphy, "%s\n", #x); \
2500 } while (0)
2501 #undef ADM8211_INT
2502 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2503 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2504 u16 addr, u32 value) { \
2505 struct adm8211_priv *priv = dev->priv; \
2506 unsigned int i; \
2507 u32 reg, bitbuf; \
2508 \
2509 value &= v_mask; \
2510 addr &= a_mask; \
2511 bitbuf = (value << v_shift) | (addr << a_shift); \
2512 \
2513 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2514 ADM8211_CSR_READ(SYNRF); \
2515 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2516 ADM8211_CSR_READ(SYNRF); \
2517 \
2518 if (prewrite) { \
2519 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2520 ADM8211_CSR_READ(SYNRF); \
2521 } \
2522 \
2523 for (i = 0; i <= bits; i++) { \
2524 if (bitbuf & (1 << (bits - i))) \
2525 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2526 else \
2527 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2528 \
2529 ADM8211_CSR_WRITE(SYNRF, reg); \
2530 ADM8211_CSR_READ(SYNRF); \
2531 \
2532 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2533 ADM8211_CSR_READ(SYNRF); \
2534 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2535 ADM8211_CSR_READ(SYNRF); \
2536 } \
2537 \
2538 if (postwrite == 1) { \
2539 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2540 ADM8211_CSR_READ(SYNRF); \
2541 } \
2542 if (postwrite == 2) { \
2543 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2544 ADM8211_CSR_READ(SYNRF); \
2545 } \
2546 \
2547 ADM8211_CSR_WRITE(SYNRF, 0); \
2548 ADM8211_CSR_READ(SYNRF); \
2549 }
2550 #undef WRITE_SYN
2551 /* LDV_COMMENT_END_PREP */
2552 /* LDV_COMMENT_BEGIN_PREP */
2553 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2554 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2555 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2556 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2557 #define IEEE80211_DUR_DS_SLOW_ACK 112
2558 #define IEEE80211_DUR_DS_FAST_ACK 56
2559 #define IEEE80211_DUR_DS_SLOW_CTS 112
2560 #define IEEE80211_DUR_DS_FAST_CTS 56
2561 #define IEEE80211_DUR_DS_SLOT 20
2562 #define IEEE80211_DUR_DS_SIFS 10
2563 #ifdef CONFIG_PM
2564 #endif
2565 #ifdef CONFIG_PM
2566 #endif
2567 /* LDV_COMMENT_END_PREP */
2568 /* content: static int adm8211_config(struct ieee80211_hw *dev, u32 changed)*/
2569 /* LDV_COMMENT_BEGIN_PREP */
2570 #define ADM8211_INT(x) \
2571 do { \
2572 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2573 wiphy_debug(dev->wiphy, "%s\n", #x); \
2574 } while (0)
2575 #undef ADM8211_INT
2576 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2577 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2578 u16 addr, u32 value) { \
2579 struct adm8211_priv *priv = dev->priv; \
2580 unsigned int i; \
2581 u32 reg, bitbuf; \
2582 \
2583 value &= v_mask; \
2584 addr &= a_mask; \
2585 bitbuf = (value << v_shift) | (addr << a_shift); \
2586 \
2587 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2588 ADM8211_CSR_READ(SYNRF); \
2589 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2590 ADM8211_CSR_READ(SYNRF); \
2591 \
2592 if (prewrite) { \
2593 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2594 ADM8211_CSR_READ(SYNRF); \
2595 } \
2596 \
2597 for (i = 0; i <= bits; i++) { \
2598 if (bitbuf & (1 << (bits - i))) \
2599 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2600 else \
2601 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2602 \
2603 ADM8211_CSR_WRITE(SYNRF, reg); \
2604 ADM8211_CSR_READ(SYNRF); \
2605 \
2606 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2607 ADM8211_CSR_READ(SYNRF); \
2608 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2609 ADM8211_CSR_READ(SYNRF); \
2610 } \
2611 \
2612 if (postwrite == 1) { \
2613 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2614 ADM8211_CSR_READ(SYNRF); \
2615 } \
2616 if (postwrite == 2) { \
2617 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2618 ADM8211_CSR_READ(SYNRF); \
2619 } \
2620 \
2621 ADM8211_CSR_WRITE(SYNRF, 0); \
2622 ADM8211_CSR_READ(SYNRF); \
2623 }
2624 #undef WRITE_SYN
2625 /* LDV_COMMENT_END_PREP */
2626 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_config" */
2627 u32 var_adm8211_config_21_p1;
2628 /* LDV_COMMENT_BEGIN_PREP */
2629 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2630 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2631 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2632 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2633 #define IEEE80211_DUR_DS_SLOW_ACK 112
2634 #define IEEE80211_DUR_DS_FAST_ACK 56
2635 #define IEEE80211_DUR_DS_SLOW_CTS 112
2636 #define IEEE80211_DUR_DS_FAST_CTS 56
2637 #define IEEE80211_DUR_DS_SLOT 20
2638 #define IEEE80211_DUR_DS_SIFS 10
2639 #ifdef CONFIG_PM
2640 #endif
2641 #ifdef CONFIG_PM
2642 #endif
2643 /* LDV_COMMENT_END_PREP */
2644 /* content: static void adm8211_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changes)*/
2645 /* LDV_COMMENT_BEGIN_PREP */
2646 #define ADM8211_INT(x) \
2647 do { \
2648 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2649 wiphy_debug(dev->wiphy, "%s\n", #x); \
2650 } while (0)
2651 #undef ADM8211_INT
2652 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2653 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2654 u16 addr, u32 value) { \
2655 struct adm8211_priv *priv = dev->priv; \
2656 unsigned int i; \
2657 u32 reg, bitbuf; \
2658 \
2659 value &= v_mask; \
2660 addr &= a_mask; \
2661 bitbuf = (value << v_shift) | (addr << a_shift); \
2662 \
2663 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2664 ADM8211_CSR_READ(SYNRF); \
2665 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2666 ADM8211_CSR_READ(SYNRF); \
2667 \
2668 if (prewrite) { \
2669 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2670 ADM8211_CSR_READ(SYNRF); \
2671 } \
2672 \
2673 for (i = 0; i <= bits; i++) { \
2674 if (bitbuf & (1 << (bits - i))) \
2675 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2676 else \
2677 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2678 \
2679 ADM8211_CSR_WRITE(SYNRF, reg); \
2680 ADM8211_CSR_READ(SYNRF); \
2681 \
2682 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2683 ADM8211_CSR_READ(SYNRF); \
2684 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2685 ADM8211_CSR_READ(SYNRF); \
2686 } \
2687 \
2688 if (postwrite == 1) { \
2689 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2690 ADM8211_CSR_READ(SYNRF); \
2691 } \
2692 if (postwrite == 2) { \
2693 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2694 ADM8211_CSR_READ(SYNRF); \
2695 } \
2696 \
2697 ADM8211_CSR_WRITE(SYNRF, 0); \
2698 ADM8211_CSR_READ(SYNRF); \
2699 }
2700 #undef WRITE_SYN
2701 /* LDV_COMMENT_END_PREP */
2702 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_bss_info_changed" */
2703 struct ieee80211_bss_conf * var_adm8211_bss_info_changed_22_p2;
2704 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_bss_info_changed" */
2705 u32 var_adm8211_bss_info_changed_22_p3;
2706 /* LDV_COMMENT_BEGIN_PREP */
2707 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2708 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2709 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2710 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2711 #define IEEE80211_DUR_DS_SLOW_ACK 112
2712 #define IEEE80211_DUR_DS_FAST_ACK 56
2713 #define IEEE80211_DUR_DS_SLOW_CTS 112
2714 #define IEEE80211_DUR_DS_FAST_CTS 56
2715 #define IEEE80211_DUR_DS_SLOT 20
2716 #define IEEE80211_DUR_DS_SIFS 10
2717 #ifdef CONFIG_PM
2718 #endif
2719 #ifdef CONFIG_PM
2720 #endif
2721 /* LDV_COMMENT_END_PREP */
2722 /* content: static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list)*/
2723 /* LDV_COMMENT_BEGIN_PREP */
2724 #define ADM8211_INT(x) \
2725 do { \
2726 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2727 wiphy_debug(dev->wiphy, "%s\n", #x); \
2728 } while (0)
2729 #undef ADM8211_INT
2730 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2731 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2732 u16 addr, u32 value) { \
2733 struct adm8211_priv *priv = dev->priv; \
2734 unsigned int i; \
2735 u32 reg, bitbuf; \
2736 \
2737 value &= v_mask; \
2738 addr &= a_mask; \
2739 bitbuf = (value << v_shift) | (addr << a_shift); \
2740 \
2741 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2742 ADM8211_CSR_READ(SYNRF); \
2743 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2744 ADM8211_CSR_READ(SYNRF); \
2745 \
2746 if (prewrite) { \
2747 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2748 ADM8211_CSR_READ(SYNRF); \
2749 } \
2750 \
2751 for (i = 0; i <= bits; i++) { \
2752 if (bitbuf & (1 << (bits - i))) \
2753 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2754 else \
2755 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2756 \
2757 ADM8211_CSR_WRITE(SYNRF, reg); \
2758 ADM8211_CSR_READ(SYNRF); \
2759 \
2760 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2761 ADM8211_CSR_READ(SYNRF); \
2762 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2763 ADM8211_CSR_READ(SYNRF); \
2764 } \
2765 \
2766 if (postwrite == 1) { \
2767 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2768 ADM8211_CSR_READ(SYNRF); \
2769 } \
2770 if (postwrite == 2) { \
2771 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2772 ADM8211_CSR_READ(SYNRF); \
2773 } \
2774 \
2775 ADM8211_CSR_WRITE(SYNRF, 0); \
2776 ADM8211_CSR_READ(SYNRF); \
2777 }
2778 #undef WRITE_SYN
2779 /* LDV_COMMENT_END_PREP */
2780 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_prepare_multicast" */
2781 struct netdev_hw_addr_list * var_group5;
2782 /* LDV_COMMENT_BEGIN_PREP */
2783 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2784 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2785 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2786 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2787 #define IEEE80211_DUR_DS_SLOW_ACK 112
2788 #define IEEE80211_DUR_DS_FAST_ACK 56
2789 #define IEEE80211_DUR_DS_SLOW_CTS 112
2790 #define IEEE80211_DUR_DS_FAST_CTS 56
2791 #define IEEE80211_DUR_DS_SLOT 20
2792 #define IEEE80211_DUR_DS_SIFS 10
2793 #ifdef CONFIG_PM
2794 #endif
2795 #ifdef CONFIG_PM
2796 #endif
2797 /* LDV_COMMENT_END_PREP */
2798 /* content: static void adm8211_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast)*/
2799 /* LDV_COMMENT_BEGIN_PREP */
2800 #define ADM8211_INT(x) \
2801 do { \
2802 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2803 wiphy_debug(dev->wiphy, "%s\n", #x); \
2804 } while (0)
2805 #undef ADM8211_INT
2806 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2807 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2808 u16 addr, u32 value) { \
2809 struct adm8211_priv *priv = dev->priv; \
2810 unsigned int i; \
2811 u32 reg, bitbuf; \
2812 \
2813 value &= v_mask; \
2814 addr &= a_mask; \
2815 bitbuf = (value << v_shift) | (addr << a_shift); \
2816 \
2817 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2818 ADM8211_CSR_READ(SYNRF); \
2819 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2820 ADM8211_CSR_READ(SYNRF); \
2821 \
2822 if (prewrite) { \
2823 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2824 ADM8211_CSR_READ(SYNRF); \
2825 } \
2826 \
2827 for (i = 0; i <= bits; i++) { \
2828 if (bitbuf & (1 << (bits - i))) \
2829 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2830 else \
2831 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2832 \
2833 ADM8211_CSR_WRITE(SYNRF, reg); \
2834 ADM8211_CSR_READ(SYNRF); \
2835 \
2836 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2837 ADM8211_CSR_READ(SYNRF); \
2838 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2839 ADM8211_CSR_READ(SYNRF); \
2840 } \
2841 \
2842 if (postwrite == 1) { \
2843 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2844 ADM8211_CSR_READ(SYNRF); \
2845 } \
2846 if (postwrite == 2) { \
2847 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2848 ADM8211_CSR_READ(SYNRF); \
2849 } \
2850 \
2851 ADM8211_CSR_WRITE(SYNRF, 0); \
2852 ADM8211_CSR_READ(SYNRF); \
2853 }
2854 #undef WRITE_SYN
2855 /* LDV_COMMENT_END_PREP */
2856 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_configure_filter" */
2857 unsigned int var_adm8211_configure_filter_24_p1;
2858 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_configure_filter" */
2859 unsigned int * var_adm8211_configure_filter_24_p2;
2860 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_configure_filter" */
2861 u64 var_adm8211_configure_filter_24_p3;
2862 /* LDV_COMMENT_BEGIN_PREP */
2863 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2864 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2865 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2866 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2867 #define IEEE80211_DUR_DS_SLOW_ACK 112
2868 #define IEEE80211_DUR_DS_FAST_ACK 56
2869 #define IEEE80211_DUR_DS_SLOW_CTS 112
2870 #define IEEE80211_DUR_DS_FAST_CTS 56
2871 #define IEEE80211_DUR_DS_SLOT 20
2872 #define IEEE80211_DUR_DS_SIFS 10
2873 #ifdef CONFIG_PM
2874 #endif
2875 #ifdef CONFIG_PM
2876 #endif
2877 /* LDV_COMMENT_END_PREP */
2878 /* content: static int adm8211_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats)*/
2879 /* LDV_COMMENT_END_PREP */
2880 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_get_stats" */
2881 struct ieee80211_low_level_stats * var_group6;
2882 /* LDV_COMMENT_BEGIN_PREP */
2883 #define ADM8211_INT(x) \
2884 do { \
2885 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2886 wiphy_debug(dev->wiphy, "%s\n", #x); \
2887 } while (0)
2888 #undef ADM8211_INT
2889 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2890 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2891 u16 addr, u32 value) { \
2892 struct adm8211_priv *priv = dev->priv; \
2893 unsigned int i; \
2894 u32 reg, bitbuf; \
2895 \
2896 value &= v_mask; \
2897 addr &= a_mask; \
2898 bitbuf = (value << v_shift) | (addr << a_shift); \
2899 \
2900 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2901 ADM8211_CSR_READ(SYNRF); \
2902 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2903 ADM8211_CSR_READ(SYNRF); \
2904 \
2905 if (prewrite) { \
2906 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2907 ADM8211_CSR_READ(SYNRF); \
2908 } \
2909 \
2910 for (i = 0; i <= bits; i++) { \
2911 if (bitbuf & (1 << (bits - i))) \
2912 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2913 else \
2914 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2915 \
2916 ADM8211_CSR_WRITE(SYNRF, reg); \
2917 ADM8211_CSR_READ(SYNRF); \
2918 \
2919 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2920 ADM8211_CSR_READ(SYNRF); \
2921 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2922 ADM8211_CSR_READ(SYNRF); \
2923 } \
2924 \
2925 if (postwrite == 1) { \
2926 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2927 ADM8211_CSR_READ(SYNRF); \
2928 } \
2929 if (postwrite == 2) { \
2930 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2931 ADM8211_CSR_READ(SYNRF); \
2932 } \
2933 \
2934 ADM8211_CSR_WRITE(SYNRF, 0); \
2935 ADM8211_CSR_READ(SYNRF); \
2936 }
2937 #undef WRITE_SYN
2938 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2939 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2940 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2941 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2942 #define IEEE80211_DUR_DS_SLOW_ACK 112
2943 #define IEEE80211_DUR_DS_FAST_ACK 56
2944 #define IEEE80211_DUR_DS_SLOW_CTS 112
2945 #define IEEE80211_DUR_DS_FAST_CTS 56
2946 #define IEEE80211_DUR_DS_SLOT 20
2947 #define IEEE80211_DUR_DS_SIFS 10
2948 #ifdef CONFIG_PM
2949 #endif
2950 #ifdef CONFIG_PM
2951 #endif
2952 /* LDV_COMMENT_END_PREP */
2953 /* content: static u64 adm8211_get_tsft(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
2954 /* LDV_COMMENT_BEGIN_PREP */
2955 #define ADM8211_INT(x) \
2956 do { \
2957 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2958 wiphy_debug(dev->wiphy, "%s\n", #x); \
2959 } while (0)
2960 #undef ADM8211_INT
2961 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2962 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2963 u16 addr, u32 value) { \
2964 struct adm8211_priv *priv = dev->priv; \
2965 unsigned int i; \
2966 u32 reg, bitbuf; \
2967 \
2968 value &= v_mask; \
2969 addr &= a_mask; \
2970 bitbuf = (value << v_shift) | (addr << a_shift); \
2971 \
2972 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2973 ADM8211_CSR_READ(SYNRF); \
2974 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2975 ADM8211_CSR_READ(SYNRF); \
2976 \
2977 if (prewrite) { \
2978 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2979 ADM8211_CSR_READ(SYNRF); \
2980 } \
2981 \
2982 for (i = 0; i <= bits; i++) { \
2983 if (bitbuf & (1 << (bits - i))) \
2984 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2985 else \
2986 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2987 \
2988 ADM8211_CSR_WRITE(SYNRF, reg); \
2989 ADM8211_CSR_READ(SYNRF); \
2990 \
2991 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2992 ADM8211_CSR_READ(SYNRF); \
2993 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2994 ADM8211_CSR_READ(SYNRF); \
2995 } \
2996 \
2997 if (postwrite == 1) { \
2998 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2999 ADM8211_CSR_READ(SYNRF); \
3000 } \
3001 if (postwrite == 2) { \
3002 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3003 ADM8211_CSR_READ(SYNRF); \
3004 } \
3005 \
3006 ADM8211_CSR_WRITE(SYNRF, 0); \
3007 ADM8211_CSR_READ(SYNRF); \
3008 }
3009 #undef WRITE_SYN
3010 /* LDV_COMMENT_END_PREP */
3011 /* LDV_COMMENT_BEGIN_PREP */
3012 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3013 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3014 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3015 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3016 #define IEEE80211_DUR_DS_SLOW_ACK 112
3017 #define IEEE80211_DUR_DS_FAST_ACK 56
3018 #define IEEE80211_DUR_DS_SLOW_CTS 112
3019 #define IEEE80211_DUR_DS_FAST_CTS 56
3020 #define IEEE80211_DUR_DS_SLOT 20
3021 #define IEEE80211_DUR_DS_SIFS 10
3022 #ifdef CONFIG_PM
3023 #endif
3024 #ifdef CONFIG_PM
3025 #endif
3026 /* LDV_COMMENT_END_PREP */
3027
3028 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
3029 /* content: static int adm8211_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
3030 /* LDV_COMMENT_BEGIN_PREP */
3031 #define ADM8211_INT(x) \
3032 do { \
3033 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3034 wiphy_debug(dev->wiphy, "%s\n", #x); \
3035 } while (0)
3036 #undef ADM8211_INT
3037 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3038 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3039 u16 addr, u32 value) { \
3040 struct adm8211_priv *priv = dev->priv; \
3041 unsigned int i; \
3042 u32 reg, bitbuf; \
3043 \
3044 value &= v_mask; \
3045 addr &= a_mask; \
3046 bitbuf = (value << v_shift) | (addr << a_shift); \
3047 \
3048 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3049 ADM8211_CSR_READ(SYNRF); \
3050 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3051 ADM8211_CSR_READ(SYNRF); \
3052 \
3053 if (prewrite) { \
3054 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3055 ADM8211_CSR_READ(SYNRF); \
3056 } \
3057 \
3058 for (i = 0; i <= bits; i++) { \
3059 if (bitbuf & (1 << (bits - i))) \
3060 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3061 else \
3062 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3063 \
3064 ADM8211_CSR_WRITE(SYNRF, reg); \
3065 ADM8211_CSR_READ(SYNRF); \
3066 \
3067 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3068 ADM8211_CSR_READ(SYNRF); \
3069 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3070 ADM8211_CSR_READ(SYNRF); \
3071 } \
3072 \
3073 if (postwrite == 1) { \
3074 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3075 ADM8211_CSR_READ(SYNRF); \
3076 } \
3077 if (postwrite == 2) { \
3078 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3079 ADM8211_CSR_READ(SYNRF); \
3080 } \
3081 \
3082 ADM8211_CSR_WRITE(SYNRF, 0); \
3083 ADM8211_CSR_READ(SYNRF); \
3084 }
3085 #undef WRITE_SYN
3086 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3087 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3088 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3089 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3090 #define IEEE80211_DUR_DS_SLOW_ACK 112
3091 #define IEEE80211_DUR_DS_FAST_ACK 56
3092 #define IEEE80211_DUR_DS_SLOW_CTS 112
3093 #define IEEE80211_DUR_DS_FAST_CTS 56
3094 #define IEEE80211_DUR_DS_SLOT 20
3095 #define IEEE80211_DUR_DS_SIFS 10
3096 /* LDV_COMMENT_END_PREP */
3097 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_probe" */
3098 struct pci_dev * var_group7;
3099 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_probe" */
3100 const struct pci_device_id * var_adm8211_probe_35_p1;
3101 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "adm8211_probe" */
3102 static int res_adm8211_probe_35;
3103 /* LDV_COMMENT_BEGIN_PREP */
3104 #ifdef CONFIG_PM
3105 #endif
3106 #ifdef CONFIG_PM
3107 #endif
3108 /* LDV_COMMENT_END_PREP */
3109 /* content: static void adm8211_remove(struct pci_dev *pdev)*/
3110 /* LDV_COMMENT_BEGIN_PREP */
3111 #define ADM8211_INT(x) \
3112 do { \
3113 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3114 wiphy_debug(dev->wiphy, "%s\n", #x); \
3115 } while (0)
3116 #undef ADM8211_INT
3117 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3118 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3119 u16 addr, u32 value) { \
3120 struct adm8211_priv *priv = dev->priv; \
3121 unsigned int i; \
3122 u32 reg, bitbuf; \
3123 \
3124 value &= v_mask; \
3125 addr &= a_mask; \
3126 bitbuf = (value << v_shift) | (addr << a_shift); \
3127 \
3128 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3129 ADM8211_CSR_READ(SYNRF); \
3130 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3131 ADM8211_CSR_READ(SYNRF); \
3132 \
3133 if (prewrite) { \
3134 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3135 ADM8211_CSR_READ(SYNRF); \
3136 } \
3137 \
3138 for (i = 0; i <= bits; i++) { \
3139 if (bitbuf & (1 << (bits - i))) \
3140 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3141 else \
3142 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3143 \
3144 ADM8211_CSR_WRITE(SYNRF, reg); \
3145 ADM8211_CSR_READ(SYNRF); \
3146 \
3147 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3148 ADM8211_CSR_READ(SYNRF); \
3149 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3150 ADM8211_CSR_READ(SYNRF); \
3151 } \
3152 \
3153 if (postwrite == 1) { \
3154 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3155 ADM8211_CSR_READ(SYNRF); \
3156 } \
3157 if (postwrite == 2) { \
3158 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3159 ADM8211_CSR_READ(SYNRF); \
3160 } \
3161 \
3162 ADM8211_CSR_WRITE(SYNRF, 0); \
3163 ADM8211_CSR_READ(SYNRF); \
3164 }
3165 #undef WRITE_SYN
3166 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3167 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3168 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3169 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3170 #define IEEE80211_DUR_DS_SLOW_ACK 112
3171 #define IEEE80211_DUR_DS_FAST_ACK 56
3172 #define IEEE80211_DUR_DS_SLOW_CTS 112
3173 #define IEEE80211_DUR_DS_FAST_CTS 56
3174 #define IEEE80211_DUR_DS_SLOT 20
3175 #define IEEE80211_DUR_DS_SIFS 10
3176 /* LDV_COMMENT_END_PREP */
3177 /* LDV_COMMENT_BEGIN_PREP */
3178 #ifdef CONFIG_PM
3179 #endif
3180 #ifdef CONFIG_PM
3181 #endif
3182 /* LDV_COMMENT_END_PREP */
3183 /* content: static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)*/
3184 /* LDV_COMMENT_BEGIN_PREP */
3185 #define ADM8211_INT(x) \
3186 do { \
3187 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3188 wiphy_debug(dev->wiphy, "%s\n", #x); \
3189 } while (0)
3190 #undef ADM8211_INT
3191 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3192 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3193 u16 addr, u32 value) { \
3194 struct adm8211_priv *priv = dev->priv; \
3195 unsigned int i; \
3196 u32 reg, bitbuf; \
3197 \
3198 value &= v_mask; \
3199 addr &= a_mask; \
3200 bitbuf = (value << v_shift) | (addr << a_shift); \
3201 \
3202 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3203 ADM8211_CSR_READ(SYNRF); \
3204 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3205 ADM8211_CSR_READ(SYNRF); \
3206 \
3207 if (prewrite) { \
3208 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3209 ADM8211_CSR_READ(SYNRF); \
3210 } \
3211 \
3212 for (i = 0; i <= bits; i++) { \
3213 if (bitbuf & (1 << (bits - i))) \
3214 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3215 else \
3216 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3217 \
3218 ADM8211_CSR_WRITE(SYNRF, reg); \
3219 ADM8211_CSR_READ(SYNRF); \
3220 \
3221 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3222 ADM8211_CSR_READ(SYNRF); \
3223 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3224 ADM8211_CSR_READ(SYNRF); \
3225 } \
3226 \
3227 if (postwrite == 1) { \
3228 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3229 ADM8211_CSR_READ(SYNRF); \
3230 } \
3231 if (postwrite == 2) { \
3232 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3233 ADM8211_CSR_READ(SYNRF); \
3234 } \
3235 \
3236 ADM8211_CSR_WRITE(SYNRF, 0); \
3237 ADM8211_CSR_READ(SYNRF); \
3238 }
3239 #undef WRITE_SYN
3240 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3241 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3242 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3243 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3244 #define IEEE80211_DUR_DS_SLOW_ACK 112
3245 #define IEEE80211_DUR_DS_FAST_ACK 56
3246 #define IEEE80211_DUR_DS_SLOW_CTS 112
3247 #define IEEE80211_DUR_DS_FAST_CTS 56
3248 #define IEEE80211_DUR_DS_SLOT 20
3249 #define IEEE80211_DUR_DS_SIFS 10
3250 #ifdef CONFIG_PM
3251 /* LDV_COMMENT_END_PREP */
3252 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_suspend" */
3253 pm_message_t var_adm8211_suspend_37_p1;
3254 /* LDV_COMMENT_BEGIN_PREP */
3255 #endif
3256 #ifdef CONFIG_PM
3257 #endif
3258 /* LDV_COMMENT_END_PREP */
3259 /* content: static int adm8211_resume(struct pci_dev *pdev)*/
3260 /* LDV_COMMENT_BEGIN_PREP */
3261 #define ADM8211_INT(x) \
3262 do { \
3263 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3264 wiphy_debug(dev->wiphy, "%s\n", #x); \
3265 } while (0)
3266 #undef ADM8211_INT
3267 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3268 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3269 u16 addr, u32 value) { \
3270 struct adm8211_priv *priv = dev->priv; \
3271 unsigned int i; \
3272 u32 reg, bitbuf; \
3273 \
3274 value &= v_mask; \
3275 addr &= a_mask; \
3276 bitbuf = (value << v_shift) | (addr << a_shift); \
3277 \
3278 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3279 ADM8211_CSR_READ(SYNRF); \
3280 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3281 ADM8211_CSR_READ(SYNRF); \
3282 \
3283 if (prewrite) { \
3284 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3285 ADM8211_CSR_READ(SYNRF); \
3286 } \
3287 \
3288 for (i = 0; i <= bits; i++) { \
3289 if (bitbuf & (1 << (bits - i))) \
3290 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3291 else \
3292 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3293 \
3294 ADM8211_CSR_WRITE(SYNRF, reg); \
3295 ADM8211_CSR_READ(SYNRF); \
3296 \
3297 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3298 ADM8211_CSR_READ(SYNRF); \
3299 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3300 ADM8211_CSR_READ(SYNRF); \
3301 } \
3302 \
3303 if (postwrite == 1) { \
3304 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3305 ADM8211_CSR_READ(SYNRF); \
3306 } \
3307 if (postwrite == 2) { \
3308 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3309 ADM8211_CSR_READ(SYNRF); \
3310 } \
3311 \
3312 ADM8211_CSR_WRITE(SYNRF, 0); \
3313 ADM8211_CSR_READ(SYNRF); \
3314 }
3315 #undef WRITE_SYN
3316 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3317 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3318 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3319 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3320 #define IEEE80211_DUR_DS_SLOW_ACK 112
3321 #define IEEE80211_DUR_DS_FAST_ACK 56
3322 #define IEEE80211_DUR_DS_SLOW_CTS 112
3323 #define IEEE80211_DUR_DS_FAST_CTS 56
3324 #define IEEE80211_DUR_DS_SLOT 20
3325 #define IEEE80211_DUR_DS_SIFS 10
3326 #ifdef CONFIG_PM
3327 /* LDV_COMMENT_END_PREP */
3328 /* LDV_COMMENT_BEGIN_PREP */
3329 #endif
3330 #ifdef CONFIG_PM
3331 #endif
3332 /* LDV_COMMENT_END_PREP */
3333
3334 /** CALLBACK SECTION request_irq **/
3335 /* content: static irqreturn_t adm8211_interrupt(int irq, void *dev_id)*/
3336 /* LDV_COMMENT_END_PREP */
3337 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_interrupt" */
3338 int var_adm8211_interrupt_9_p0;
3339 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_interrupt" */
3340 void * var_adm8211_interrupt_9_p1;
3341 /* LDV_COMMENT_BEGIN_PREP */
3342 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3343 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3344 u16 addr, u32 value) { \
3345 struct adm8211_priv *priv = dev->priv; \
3346 unsigned int i; \
3347 u32 reg, bitbuf; \
3348 \
3349 value &= v_mask; \
3350 addr &= a_mask; \
3351 bitbuf = (value << v_shift) | (addr << a_shift); \
3352 \
3353 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3354 ADM8211_CSR_READ(SYNRF); \
3355 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3356 ADM8211_CSR_READ(SYNRF); \
3357 \
3358 if (prewrite) { \
3359 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3360 ADM8211_CSR_READ(SYNRF); \
3361 } \
3362 \
3363 for (i = 0; i <= bits; i++) { \
3364 if (bitbuf & (1 << (bits - i))) \
3365 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3366 else \
3367 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3368 \
3369 ADM8211_CSR_WRITE(SYNRF, reg); \
3370 ADM8211_CSR_READ(SYNRF); \
3371 \
3372 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3373 ADM8211_CSR_READ(SYNRF); \
3374 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3375 ADM8211_CSR_READ(SYNRF); \
3376 } \
3377 \
3378 if (postwrite == 1) { \
3379 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3380 ADM8211_CSR_READ(SYNRF); \
3381 } \
3382 if (postwrite == 2) { \
3383 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3384 ADM8211_CSR_READ(SYNRF); \
3385 } \
3386 \
3387 ADM8211_CSR_WRITE(SYNRF, 0); \
3388 ADM8211_CSR_READ(SYNRF); \
3389 }
3390 #undef WRITE_SYN
3391 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3392 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3393 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3394 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3395 #define IEEE80211_DUR_DS_SLOW_ACK 112
3396 #define IEEE80211_DUR_DS_FAST_ACK 56
3397 #define IEEE80211_DUR_DS_SLOW_CTS 112
3398 #define IEEE80211_DUR_DS_FAST_CTS 56
3399 #define IEEE80211_DUR_DS_SLOT 20
3400 #define IEEE80211_DUR_DS_SIFS 10
3401 #ifdef CONFIG_PM
3402 #endif
3403 #ifdef CONFIG_PM
3404 #endif
3405 /* LDV_COMMENT_END_PREP */
3406
3407
3408
3409
3410 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
3411 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
3412 /*============================= VARIABLE INITIALIZING PART =============================*/
3413 LDV_IN_INTERRUPT=1;
3414
3415
3416
3417
3418 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
3419 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
3420 /*============================= FUNCTION CALL SECTION =============================*/
3421 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
3422 ldv_initialize();
3423
3424
3425
3426
3427 int ldv_s_adm8211_driver_pci_driver = 0;
3428
3429
3430
3431
3432
3433 while( nondet_int()
3434 || !(ldv_s_adm8211_driver_pci_driver == 0)
3435 ) {
3436
3437 switch(nondet_int()) {
3438
3439 case 0: {
3440
3441 /** STRUCT: struct type: eeprom_93cx6, struct name: eeprom **/
3442
3443
3444 /* content: static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)*/
3445 /* LDV_COMMENT_END_PREP */
3446 /* LDV_COMMENT_FUNCTION_CALL Function from field "register_read" from driver structure with callbacks "eeprom" */
3447 ldv_handler_precall();
3448 adm8211_eeprom_register_read( var_group1);
3449 /* LDV_COMMENT_BEGIN_PREP */
3450 #define ADM8211_INT(x) \
3451 do { \
3452 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3453 wiphy_debug(dev->wiphy, "%s\n", #x); \
3454 } while (0)
3455 #undef ADM8211_INT
3456 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3457 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3458 u16 addr, u32 value) { \
3459 struct adm8211_priv *priv = dev->priv; \
3460 unsigned int i; \
3461 u32 reg, bitbuf; \
3462 \
3463 value &= v_mask; \
3464 addr &= a_mask; \
3465 bitbuf = (value << v_shift) | (addr << a_shift); \
3466 \
3467 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3468 ADM8211_CSR_READ(SYNRF); \
3469 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3470 ADM8211_CSR_READ(SYNRF); \
3471 \
3472 if (prewrite) { \
3473 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3474 ADM8211_CSR_READ(SYNRF); \
3475 } \
3476 \
3477 for (i = 0; i <= bits; i++) { \
3478 if (bitbuf & (1 << (bits - i))) \
3479 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3480 else \
3481 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3482 \
3483 ADM8211_CSR_WRITE(SYNRF, reg); \
3484 ADM8211_CSR_READ(SYNRF); \
3485 \
3486 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3487 ADM8211_CSR_READ(SYNRF); \
3488 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3489 ADM8211_CSR_READ(SYNRF); \
3490 } \
3491 \
3492 if (postwrite == 1) { \
3493 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3494 ADM8211_CSR_READ(SYNRF); \
3495 } \
3496 if (postwrite == 2) { \
3497 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3498 ADM8211_CSR_READ(SYNRF); \
3499 } \
3500 \
3501 ADM8211_CSR_WRITE(SYNRF, 0); \
3502 ADM8211_CSR_READ(SYNRF); \
3503 }
3504 #undef WRITE_SYN
3505 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3506 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3507 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3508 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3509 #define IEEE80211_DUR_DS_SLOW_ACK 112
3510 #define IEEE80211_DUR_DS_FAST_ACK 56
3511 #define IEEE80211_DUR_DS_SLOW_CTS 112
3512 #define IEEE80211_DUR_DS_FAST_CTS 56
3513 #define IEEE80211_DUR_DS_SLOT 20
3514 #define IEEE80211_DUR_DS_SIFS 10
3515 #ifdef CONFIG_PM
3516 #endif
3517 #ifdef CONFIG_PM
3518 #endif
3519 /* LDV_COMMENT_END_PREP */
3520
3521
3522
3523
3524 }
3525
3526 break;
3527 case 1: {
3528
3529 /** STRUCT: struct type: eeprom_93cx6, struct name: eeprom **/
3530
3531
3532 /* content: static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom)*/
3533 /* LDV_COMMENT_END_PREP */
3534 /* LDV_COMMENT_FUNCTION_CALL Function from field "register_write" from driver structure with callbacks "eeprom" */
3535 ldv_handler_precall();
3536 adm8211_eeprom_register_write( var_group1);
3537 /* LDV_COMMENT_BEGIN_PREP */
3538 #define ADM8211_INT(x) \
3539 do { \
3540 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3541 wiphy_debug(dev->wiphy, "%s\n", #x); \
3542 } while (0)
3543 #undef ADM8211_INT
3544 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3545 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3546 u16 addr, u32 value) { \
3547 struct adm8211_priv *priv = dev->priv; \
3548 unsigned int i; \
3549 u32 reg, bitbuf; \
3550 \
3551 value &= v_mask; \
3552 addr &= a_mask; \
3553 bitbuf = (value << v_shift) | (addr << a_shift); \
3554 \
3555 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3556 ADM8211_CSR_READ(SYNRF); \
3557 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3558 ADM8211_CSR_READ(SYNRF); \
3559 \
3560 if (prewrite) { \
3561 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3562 ADM8211_CSR_READ(SYNRF); \
3563 } \
3564 \
3565 for (i = 0; i <= bits; i++) { \
3566 if (bitbuf & (1 << (bits - i))) \
3567 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3568 else \
3569 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3570 \
3571 ADM8211_CSR_WRITE(SYNRF, reg); \
3572 ADM8211_CSR_READ(SYNRF); \
3573 \
3574 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3575 ADM8211_CSR_READ(SYNRF); \
3576 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3577 ADM8211_CSR_READ(SYNRF); \
3578 } \
3579 \
3580 if (postwrite == 1) { \
3581 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3582 ADM8211_CSR_READ(SYNRF); \
3583 } \
3584 if (postwrite == 2) { \
3585 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3586 ADM8211_CSR_READ(SYNRF); \
3587 } \
3588 \
3589 ADM8211_CSR_WRITE(SYNRF, 0); \
3590 ADM8211_CSR_READ(SYNRF); \
3591 }
3592 #undef WRITE_SYN
3593 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3594 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3595 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3596 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3597 #define IEEE80211_DUR_DS_SLOW_ACK 112
3598 #define IEEE80211_DUR_DS_FAST_ACK 56
3599 #define IEEE80211_DUR_DS_SLOW_CTS 112
3600 #define IEEE80211_DUR_DS_FAST_CTS 56
3601 #define IEEE80211_DUR_DS_SLOT 20
3602 #define IEEE80211_DUR_DS_SIFS 10
3603 #ifdef CONFIG_PM
3604 #endif
3605 #ifdef CONFIG_PM
3606 #endif
3607 /* LDV_COMMENT_END_PREP */
3608
3609
3610
3611
3612 }
3613
3614 break;
3615 case 2: {
3616
3617 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3618
3619
3620 /* content: static void adm8211_tx(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb)*/
3621 /* LDV_COMMENT_BEGIN_PREP */
3622 #define ADM8211_INT(x) \
3623 do { \
3624 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3625 wiphy_debug(dev->wiphy, "%s\n", #x); \
3626 } while (0)
3627 #undef ADM8211_INT
3628 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3629 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3630 u16 addr, u32 value) { \
3631 struct adm8211_priv *priv = dev->priv; \
3632 unsigned int i; \
3633 u32 reg, bitbuf; \
3634 \
3635 value &= v_mask; \
3636 addr &= a_mask; \
3637 bitbuf = (value << v_shift) | (addr << a_shift); \
3638 \
3639 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3640 ADM8211_CSR_READ(SYNRF); \
3641 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3642 ADM8211_CSR_READ(SYNRF); \
3643 \
3644 if (prewrite) { \
3645 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3646 ADM8211_CSR_READ(SYNRF); \
3647 } \
3648 \
3649 for (i = 0; i <= bits; i++) { \
3650 if (bitbuf & (1 << (bits - i))) \
3651 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3652 else \
3653 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3654 \
3655 ADM8211_CSR_WRITE(SYNRF, reg); \
3656 ADM8211_CSR_READ(SYNRF); \
3657 \
3658 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3659 ADM8211_CSR_READ(SYNRF); \
3660 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3661 ADM8211_CSR_READ(SYNRF); \
3662 } \
3663 \
3664 if (postwrite == 1) { \
3665 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3666 ADM8211_CSR_READ(SYNRF); \
3667 } \
3668 if (postwrite == 2) { \
3669 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3670 ADM8211_CSR_READ(SYNRF); \
3671 } \
3672 \
3673 ADM8211_CSR_WRITE(SYNRF, 0); \
3674 ADM8211_CSR_READ(SYNRF); \
3675 }
3676 #undef WRITE_SYN
3677 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3678 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3679 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3680 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3681 #define IEEE80211_DUR_DS_SLOW_ACK 112
3682 #define IEEE80211_DUR_DS_FAST_ACK 56
3683 #define IEEE80211_DUR_DS_SLOW_CTS 112
3684 #define IEEE80211_DUR_DS_FAST_CTS 56
3685 #define IEEE80211_DUR_DS_SLOT 20
3686 #define IEEE80211_DUR_DS_SIFS 10
3687 /* LDV_COMMENT_END_PREP */
3688 /* LDV_COMMENT_FUNCTION_CALL Function from field "tx" from driver structure with callbacks "adm8211_ops" */
3689 ldv_handler_precall();
3690 adm8211_tx( var_group2, var_group3, var_adm8211_tx_33_p2);
3691 /* LDV_COMMENT_BEGIN_PREP */
3692 #ifdef CONFIG_PM
3693 #endif
3694 #ifdef CONFIG_PM
3695 #endif
3696 /* LDV_COMMENT_END_PREP */
3697
3698
3699
3700
3701 }
3702
3703 break;
3704 case 3: {
3705
3706 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3707
3708
3709 /* content: static int adm8211_start(struct ieee80211_hw *dev)*/
3710 /* LDV_COMMENT_BEGIN_PREP */
3711 #define ADM8211_INT(x) \
3712 do { \
3713 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3714 wiphy_debug(dev->wiphy, "%s\n", #x); \
3715 } while (0)
3716 #undef ADM8211_INT
3717 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3718 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3719 u16 addr, u32 value) { \
3720 struct adm8211_priv *priv = dev->priv; \
3721 unsigned int i; \
3722 u32 reg, bitbuf; \
3723 \
3724 value &= v_mask; \
3725 addr &= a_mask; \
3726 bitbuf = (value << v_shift) | (addr << a_shift); \
3727 \
3728 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3729 ADM8211_CSR_READ(SYNRF); \
3730 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3731 ADM8211_CSR_READ(SYNRF); \
3732 \
3733 if (prewrite) { \
3734 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3735 ADM8211_CSR_READ(SYNRF); \
3736 } \
3737 \
3738 for (i = 0; i <= bits; i++) { \
3739 if (bitbuf & (1 << (bits - i))) \
3740 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3741 else \
3742 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3743 \
3744 ADM8211_CSR_WRITE(SYNRF, reg); \
3745 ADM8211_CSR_READ(SYNRF); \
3746 \
3747 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3748 ADM8211_CSR_READ(SYNRF); \
3749 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3750 ADM8211_CSR_READ(SYNRF); \
3751 } \
3752 \
3753 if (postwrite == 1) { \
3754 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3755 ADM8211_CSR_READ(SYNRF); \
3756 } \
3757 if (postwrite == 2) { \
3758 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3759 ADM8211_CSR_READ(SYNRF); \
3760 } \
3761 \
3762 ADM8211_CSR_WRITE(SYNRF, 0); \
3763 ADM8211_CSR_READ(SYNRF); \
3764 }
3765 #undef WRITE_SYN
3766 /* LDV_COMMENT_END_PREP */
3767 /* LDV_COMMENT_FUNCTION_CALL Function from field "start" from driver structure with callbacks "adm8211_ops" */
3768 ldv_handler_precall();
3769 adm8211_start( var_group2);
3770 /* LDV_COMMENT_BEGIN_PREP */
3771 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3772 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3773 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3774 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3775 #define IEEE80211_DUR_DS_SLOW_ACK 112
3776 #define IEEE80211_DUR_DS_FAST_ACK 56
3777 #define IEEE80211_DUR_DS_SLOW_CTS 112
3778 #define IEEE80211_DUR_DS_FAST_CTS 56
3779 #define IEEE80211_DUR_DS_SLOT 20
3780 #define IEEE80211_DUR_DS_SIFS 10
3781 #ifdef CONFIG_PM
3782 #endif
3783 #ifdef CONFIG_PM
3784 #endif
3785 /* LDV_COMMENT_END_PREP */
3786
3787
3788
3789
3790 }
3791
3792 break;
3793 case 4: {
3794
3795 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3796
3797
3798 /* content: static void adm8211_stop(struct ieee80211_hw *dev)*/
3799 /* LDV_COMMENT_BEGIN_PREP */
3800 #define ADM8211_INT(x) \
3801 do { \
3802 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3803 wiphy_debug(dev->wiphy, "%s\n", #x); \
3804 } while (0)
3805 #undef ADM8211_INT
3806 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3807 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3808 u16 addr, u32 value) { \
3809 struct adm8211_priv *priv = dev->priv; \
3810 unsigned int i; \
3811 u32 reg, bitbuf; \
3812 \
3813 value &= v_mask; \
3814 addr &= a_mask; \
3815 bitbuf = (value << v_shift) | (addr << a_shift); \
3816 \
3817 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3818 ADM8211_CSR_READ(SYNRF); \
3819 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3820 ADM8211_CSR_READ(SYNRF); \
3821 \
3822 if (prewrite) { \
3823 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3824 ADM8211_CSR_READ(SYNRF); \
3825 } \
3826 \
3827 for (i = 0; i <= bits; i++) { \
3828 if (bitbuf & (1 << (bits - i))) \
3829 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3830 else \
3831 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3832 \
3833 ADM8211_CSR_WRITE(SYNRF, reg); \
3834 ADM8211_CSR_READ(SYNRF); \
3835 \
3836 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3837 ADM8211_CSR_READ(SYNRF); \
3838 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3839 ADM8211_CSR_READ(SYNRF); \
3840 } \
3841 \
3842 if (postwrite == 1) { \
3843 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3844 ADM8211_CSR_READ(SYNRF); \
3845 } \
3846 if (postwrite == 2) { \
3847 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3848 ADM8211_CSR_READ(SYNRF); \
3849 } \
3850 \
3851 ADM8211_CSR_WRITE(SYNRF, 0); \
3852 ADM8211_CSR_READ(SYNRF); \
3853 }
3854 #undef WRITE_SYN
3855 /* LDV_COMMENT_END_PREP */
3856 /* LDV_COMMENT_FUNCTION_CALL Function from field "stop" from driver structure with callbacks "adm8211_ops" */
3857 ldv_handler_precall();
3858 adm8211_stop( var_group2);
3859 /* LDV_COMMENT_BEGIN_PREP */
3860 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3861 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3862 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3863 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3864 #define IEEE80211_DUR_DS_SLOW_ACK 112
3865 #define IEEE80211_DUR_DS_FAST_ACK 56
3866 #define IEEE80211_DUR_DS_SLOW_CTS 112
3867 #define IEEE80211_DUR_DS_FAST_CTS 56
3868 #define IEEE80211_DUR_DS_SLOT 20
3869 #define IEEE80211_DUR_DS_SIFS 10
3870 #ifdef CONFIG_PM
3871 #endif
3872 #ifdef CONFIG_PM
3873 #endif
3874 /* LDV_COMMENT_END_PREP */
3875
3876
3877
3878
3879 }
3880
3881 break;
3882 case 5: {
3883
3884 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3885
3886
3887 /* content: static int adm8211_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
3888 /* LDV_COMMENT_BEGIN_PREP */
3889 #define ADM8211_INT(x) \
3890 do { \
3891 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3892 wiphy_debug(dev->wiphy, "%s\n", #x); \
3893 } while (0)
3894 #undef ADM8211_INT
3895 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3896 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3897 u16 addr, u32 value) { \
3898 struct adm8211_priv *priv = dev->priv; \
3899 unsigned int i; \
3900 u32 reg, bitbuf; \
3901 \
3902 value &= v_mask; \
3903 addr &= a_mask; \
3904 bitbuf = (value << v_shift) | (addr << a_shift); \
3905 \
3906 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3907 ADM8211_CSR_READ(SYNRF); \
3908 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3909 ADM8211_CSR_READ(SYNRF); \
3910 \
3911 if (prewrite) { \
3912 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3913 ADM8211_CSR_READ(SYNRF); \
3914 } \
3915 \
3916 for (i = 0; i <= bits; i++) { \
3917 if (bitbuf & (1 << (bits - i))) \
3918 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3919 else \
3920 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3921 \
3922 ADM8211_CSR_WRITE(SYNRF, reg); \
3923 ADM8211_CSR_READ(SYNRF); \
3924 \
3925 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3926 ADM8211_CSR_READ(SYNRF); \
3927 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3928 ADM8211_CSR_READ(SYNRF); \
3929 } \
3930 \
3931 if (postwrite == 1) { \
3932 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3933 ADM8211_CSR_READ(SYNRF); \
3934 } \
3935 if (postwrite == 2) { \
3936 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3937 ADM8211_CSR_READ(SYNRF); \
3938 } \
3939 \
3940 ADM8211_CSR_WRITE(SYNRF, 0); \
3941 ADM8211_CSR_READ(SYNRF); \
3942 }
3943 #undef WRITE_SYN
3944 /* LDV_COMMENT_END_PREP */
3945 /* LDV_COMMENT_FUNCTION_CALL Function from field "add_interface" from driver structure with callbacks "adm8211_ops" */
3946 ldv_handler_precall();
3947 adm8211_add_interface( var_group2, var_group4);
3948 /* LDV_COMMENT_BEGIN_PREP */
3949 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3950 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3951 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3952 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3953 #define IEEE80211_DUR_DS_SLOW_ACK 112
3954 #define IEEE80211_DUR_DS_FAST_ACK 56
3955 #define IEEE80211_DUR_DS_SLOW_CTS 112
3956 #define IEEE80211_DUR_DS_FAST_CTS 56
3957 #define IEEE80211_DUR_DS_SLOT 20
3958 #define IEEE80211_DUR_DS_SIFS 10
3959 #ifdef CONFIG_PM
3960 #endif
3961 #ifdef CONFIG_PM
3962 #endif
3963 /* LDV_COMMENT_END_PREP */
3964
3965
3966
3967
3968 }
3969
3970 break;
3971 case 6: {
3972
3973 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3974
3975
3976 /* content: static void adm8211_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
3977 /* LDV_COMMENT_BEGIN_PREP */
3978 #define ADM8211_INT(x) \
3979 do { \
3980 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3981 wiphy_debug(dev->wiphy, "%s\n", #x); \
3982 } while (0)
3983 #undef ADM8211_INT
3984 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3985 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3986 u16 addr, u32 value) { \
3987 struct adm8211_priv *priv = dev->priv; \
3988 unsigned int i; \
3989 u32 reg, bitbuf; \
3990 \
3991 value &= v_mask; \
3992 addr &= a_mask; \
3993 bitbuf = (value << v_shift) | (addr << a_shift); \
3994 \
3995 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3996 ADM8211_CSR_READ(SYNRF); \
3997 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3998 ADM8211_CSR_READ(SYNRF); \
3999 \
4000 if (prewrite) { \
4001 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4002 ADM8211_CSR_READ(SYNRF); \
4003 } \
4004 \
4005 for (i = 0; i <= bits; i++) { \
4006 if (bitbuf & (1 << (bits - i))) \
4007 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4008 else \
4009 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4010 \
4011 ADM8211_CSR_WRITE(SYNRF, reg); \
4012 ADM8211_CSR_READ(SYNRF); \
4013 \
4014 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4015 ADM8211_CSR_READ(SYNRF); \
4016 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4017 ADM8211_CSR_READ(SYNRF); \
4018 } \
4019 \
4020 if (postwrite == 1) { \
4021 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4022 ADM8211_CSR_READ(SYNRF); \
4023 } \
4024 if (postwrite == 2) { \
4025 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4026 ADM8211_CSR_READ(SYNRF); \
4027 } \
4028 \
4029 ADM8211_CSR_WRITE(SYNRF, 0); \
4030 ADM8211_CSR_READ(SYNRF); \
4031 }
4032 #undef WRITE_SYN
4033 /* LDV_COMMENT_END_PREP */
4034 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove_interface" from driver structure with callbacks "adm8211_ops" */
4035 ldv_handler_precall();
4036 adm8211_remove_interface( var_group2, var_group4);
4037 /* LDV_COMMENT_BEGIN_PREP */
4038 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4039 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4040 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4041 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4042 #define IEEE80211_DUR_DS_SLOW_ACK 112
4043 #define IEEE80211_DUR_DS_FAST_ACK 56
4044 #define IEEE80211_DUR_DS_SLOW_CTS 112
4045 #define IEEE80211_DUR_DS_FAST_CTS 56
4046 #define IEEE80211_DUR_DS_SLOT 20
4047 #define IEEE80211_DUR_DS_SIFS 10
4048 #ifdef CONFIG_PM
4049 #endif
4050 #ifdef CONFIG_PM
4051 #endif
4052 /* LDV_COMMENT_END_PREP */
4053
4054
4055
4056
4057 }
4058
4059 break;
4060 case 7: {
4061
4062 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4063
4064
4065 /* content: static int adm8211_config(struct ieee80211_hw *dev, u32 changed)*/
4066 /* LDV_COMMENT_BEGIN_PREP */
4067 #define ADM8211_INT(x) \
4068 do { \
4069 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4070 wiphy_debug(dev->wiphy, "%s\n", #x); \
4071 } while (0)
4072 #undef ADM8211_INT
4073 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4074 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4075 u16 addr, u32 value) { \
4076 struct adm8211_priv *priv = dev->priv; \
4077 unsigned int i; \
4078 u32 reg, bitbuf; \
4079 \
4080 value &= v_mask; \
4081 addr &= a_mask; \
4082 bitbuf = (value << v_shift) | (addr << a_shift); \
4083 \
4084 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4085 ADM8211_CSR_READ(SYNRF); \
4086 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4087 ADM8211_CSR_READ(SYNRF); \
4088 \
4089 if (prewrite) { \
4090 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4091 ADM8211_CSR_READ(SYNRF); \
4092 } \
4093 \
4094 for (i = 0; i <= bits; i++) { \
4095 if (bitbuf & (1 << (bits - i))) \
4096 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4097 else \
4098 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4099 \
4100 ADM8211_CSR_WRITE(SYNRF, reg); \
4101 ADM8211_CSR_READ(SYNRF); \
4102 \
4103 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4104 ADM8211_CSR_READ(SYNRF); \
4105 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4106 ADM8211_CSR_READ(SYNRF); \
4107 } \
4108 \
4109 if (postwrite == 1) { \
4110 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4111 ADM8211_CSR_READ(SYNRF); \
4112 } \
4113 if (postwrite == 2) { \
4114 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4115 ADM8211_CSR_READ(SYNRF); \
4116 } \
4117 \
4118 ADM8211_CSR_WRITE(SYNRF, 0); \
4119 ADM8211_CSR_READ(SYNRF); \
4120 }
4121 #undef WRITE_SYN
4122 /* LDV_COMMENT_END_PREP */
4123 /* LDV_COMMENT_FUNCTION_CALL Function from field "config" from driver structure with callbacks "adm8211_ops" */
4124 ldv_handler_precall();
4125 adm8211_config( var_group2, var_adm8211_config_21_p1);
4126 /* LDV_COMMENT_BEGIN_PREP */
4127 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4128 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4129 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4130 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4131 #define IEEE80211_DUR_DS_SLOW_ACK 112
4132 #define IEEE80211_DUR_DS_FAST_ACK 56
4133 #define IEEE80211_DUR_DS_SLOW_CTS 112
4134 #define IEEE80211_DUR_DS_FAST_CTS 56
4135 #define IEEE80211_DUR_DS_SLOT 20
4136 #define IEEE80211_DUR_DS_SIFS 10
4137 #ifdef CONFIG_PM
4138 #endif
4139 #ifdef CONFIG_PM
4140 #endif
4141 /* LDV_COMMENT_END_PREP */
4142
4143
4144
4145
4146 }
4147
4148 break;
4149 case 8: {
4150
4151 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4152
4153
4154 /* content: static void adm8211_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changes)*/
4155 /* LDV_COMMENT_BEGIN_PREP */
4156 #define ADM8211_INT(x) \
4157 do { \
4158 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4159 wiphy_debug(dev->wiphy, "%s\n", #x); \
4160 } while (0)
4161 #undef ADM8211_INT
4162 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4163 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4164 u16 addr, u32 value) { \
4165 struct adm8211_priv *priv = dev->priv; \
4166 unsigned int i; \
4167 u32 reg, bitbuf; \
4168 \
4169 value &= v_mask; \
4170 addr &= a_mask; \
4171 bitbuf = (value << v_shift) | (addr << a_shift); \
4172 \
4173 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4174 ADM8211_CSR_READ(SYNRF); \
4175 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4176 ADM8211_CSR_READ(SYNRF); \
4177 \
4178 if (prewrite) { \
4179 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4180 ADM8211_CSR_READ(SYNRF); \
4181 } \
4182 \
4183 for (i = 0; i <= bits; i++) { \
4184 if (bitbuf & (1 << (bits - i))) \
4185 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4186 else \
4187 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4188 \
4189 ADM8211_CSR_WRITE(SYNRF, reg); \
4190 ADM8211_CSR_READ(SYNRF); \
4191 \
4192 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4193 ADM8211_CSR_READ(SYNRF); \
4194 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4195 ADM8211_CSR_READ(SYNRF); \
4196 } \
4197 \
4198 if (postwrite == 1) { \
4199 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4200 ADM8211_CSR_READ(SYNRF); \
4201 } \
4202 if (postwrite == 2) { \
4203 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4204 ADM8211_CSR_READ(SYNRF); \
4205 } \
4206 \
4207 ADM8211_CSR_WRITE(SYNRF, 0); \
4208 ADM8211_CSR_READ(SYNRF); \
4209 }
4210 #undef WRITE_SYN
4211 /* LDV_COMMENT_END_PREP */
4212 /* LDV_COMMENT_FUNCTION_CALL Function from field "bss_info_changed" from driver structure with callbacks "adm8211_ops" */
4213 ldv_handler_precall();
4214 adm8211_bss_info_changed( var_group2, var_group4, var_adm8211_bss_info_changed_22_p2, var_adm8211_bss_info_changed_22_p3);
4215 /* LDV_COMMENT_BEGIN_PREP */
4216 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4217 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4218 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4219 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4220 #define IEEE80211_DUR_DS_SLOW_ACK 112
4221 #define IEEE80211_DUR_DS_FAST_ACK 56
4222 #define IEEE80211_DUR_DS_SLOW_CTS 112
4223 #define IEEE80211_DUR_DS_FAST_CTS 56
4224 #define IEEE80211_DUR_DS_SLOT 20
4225 #define IEEE80211_DUR_DS_SIFS 10
4226 #ifdef CONFIG_PM
4227 #endif
4228 #ifdef CONFIG_PM
4229 #endif
4230 /* LDV_COMMENT_END_PREP */
4231
4232
4233
4234
4235 }
4236
4237 break;
4238 case 9: {
4239
4240 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4241
4242
4243 /* content: static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list)*/
4244 /* LDV_COMMENT_BEGIN_PREP */
4245 #define ADM8211_INT(x) \
4246 do { \
4247 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4248 wiphy_debug(dev->wiphy, "%s\n", #x); \
4249 } while (0)
4250 #undef ADM8211_INT
4251 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4252 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4253 u16 addr, u32 value) { \
4254 struct adm8211_priv *priv = dev->priv; \
4255 unsigned int i; \
4256 u32 reg, bitbuf; \
4257 \
4258 value &= v_mask; \
4259 addr &= a_mask; \
4260 bitbuf = (value << v_shift) | (addr << a_shift); \
4261 \
4262 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4263 ADM8211_CSR_READ(SYNRF); \
4264 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4265 ADM8211_CSR_READ(SYNRF); \
4266 \
4267 if (prewrite) { \
4268 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4269 ADM8211_CSR_READ(SYNRF); \
4270 } \
4271 \
4272 for (i = 0; i <= bits; i++) { \
4273 if (bitbuf & (1 << (bits - i))) \
4274 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4275 else \
4276 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4277 \
4278 ADM8211_CSR_WRITE(SYNRF, reg); \
4279 ADM8211_CSR_READ(SYNRF); \
4280 \
4281 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4282 ADM8211_CSR_READ(SYNRF); \
4283 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4284 ADM8211_CSR_READ(SYNRF); \
4285 } \
4286 \
4287 if (postwrite == 1) { \
4288 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4289 ADM8211_CSR_READ(SYNRF); \
4290 } \
4291 if (postwrite == 2) { \
4292 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4293 ADM8211_CSR_READ(SYNRF); \
4294 } \
4295 \
4296 ADM8211_CSR_WRITE(SYNRF, 0); \
4297 ADM8211_CSR_READ(SYNRF); \
4298 }
4299 #undef WRITE_SYN
4300 /* LDV_COMMENT_END_PREP */
4301 /* LDV_COMMENT_FUNCTION_CALL Function from field "prepare_multicast" from driver structure with callbacks "adm8211_ops" */
4302 ldv_handler_precall();
4303 adm8211_prepare_multicast( var_group2, var_group5);
4304 /* LDV_COMMENT_BEGIN_PREP */
4305 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4306 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4307 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4308 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4309 #define IEEE80211_DUR_DS_SLOW_ACK 112
4310 #define IEEE80211_DUR_DS_FAST_ACK 56
4311 #define IEEE80211_DUR_DS_SLOW_CTS 112
4312 #define IEEE80211_DUR_DS_FAST_CTS 56
4313 #define IEEE80211_DUR_DS_SLOT 20
4314 #define IEEE80211_DUR_DS_SIFS 10
4315 #ifdef CONFIG_PM
4316 #endif
4317 #ifdef CONFIG_PM
4318 #endif
4319 /* LDV_COMMENT_END_PREP */
4320
4321
4322
4323
4324 }
4325
4326 break;
4327 case 10: {
4328
4329 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4330
4331
4332 /* content: static void adm8211_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast)*/
4333 /* LDV_COMMENT_BEGIN_PREP */
4334 #define ADM8211_INT(x) \
4335 do { \
4336 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4337 wiphy_debug(dev->wiphy, "%s\n", #x); \
4338 } while (0)
4339 #undef ADM8211_INT
4340 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4341 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4342 u16 addr, u32 value) { \
4343 struct adm8211_priv *priv = dev->priv; \
4344 unsigned int i; \
4345 u32 reg, bitbuf; \
4346 \
4347 value &= v_mask; \
4348 addr &= a_mask; \
4349 bitbuf = (value << v_shift) | (addr << a_shift); \
4350 \
4351 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4352 ADM8211_CSR_READ(SYNRF); \
4353 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4354 ADM8211_CSR_READ(SYNRF); \
4355 \
4356 if (prewrite) { \
4357 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4358 ADM8211_CSR_READ(SYNRF); \
4359 } \
4360 \
4361 for (i = 0; i <= bits; i++) { \
4362 if (bitbuf & (1 << (bits - i))) \
4363 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4364 else \
4365 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4366 \
4367 ADM8211_CSR_WRITE(SYNRF, reg); \
4368 ADM8211_CSR_READ(SYNRF); \
4369 \
4370 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4371 ADM8211_CSR_READ(SYNRF); \
4372 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4373 ADM8211_CSR_READ(SYNRF); \
4374 } \
4375 \
4376 if (postwrite == 1) { \
4377 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4378 ADM8211_CSR_READ(SYNRF); \
4379 } \
4380 if (postwrite == 2) { \
4381 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4382 ADM8211_CSR_READ(SYNRF); \
4383 } \
4384 \
4385 ADM8211_CSR_WRITE(SYNRF, 0); \
4386 ADM8211_CSR_READ(SYNRF); \
4387 }
4388 #undef WRITE_SYN
4389 /* LDV_COMMENT_END_PREP */
4390 /* LDV_COMMENT_FUNCTION_CALL Function from field "configure_filter" from driver structure with callbacks "adm8211_ops" */
4391 ldv_handler_precall();
4392 adm8211_configure_filter( var_group2, var_adm8211_configure_filter_24_p1, var_adm8211_configure_filter_24_p2, var_adm8211_configure_filter_24_p3);
4393 /* LDV_COMMENT_BEGIN_PREP */
4394 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4395 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4396 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4397 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4398 #define IEEE80211_DUR_DS_SLOW_ACK 112
4399 #define IEEE80211_DUR_DS_FAST_ACK 56
4400 #define IEEE80211_DUR_DS_SLOW_CTS 112
4401 #define IEEE80211_DUR_DS_FAST_CTS 56
4402 #define IEEE80211_DUR_DS_SLOT 20
4403 #define IEEE80211_DUR_DS_SIFS 10
4404 #ifdef CONFIG_PM
4405 #endif
4406 #ifdef CONFIG_PM
4407 #endif
4408 /* LDV_COMMENT_END_PREP */
4409
4410
4411
4412
4413 }
4414
4415 break;
4416 case 11: {
4417
4418 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4419
4420
4421 /* content: static int adm8211_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats)*/
4422 /* LDV_COMMENT_END_PREP */
4423 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_stats" from driver structure with callbacks "adm8211_ops" */
4424 ldv_handler_precall();
4425 adm8211_get_stats( var_group2, var_group6);
4426 /* LDV_COMMENT_BEGIN_PREP */
4427 #define ADM8211_INT(x) \
4428 do { \
4429 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4430 wiphy_debug(dev->wiphy, "%s\n", #x); \
4431 } while (0)
4432 #undef ADM8211_INT
4433 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4434 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4435 u16 addr, u32 value) { \
4436 struct adm8211_priv *priv = dev->priv; \
4437 unsigned int i; \
4438 u32 reg, bitbuf; \
4439 \
4440 value &= v_mask; \
4441 addr &= a_mask; \
4442 bitbuf = (value << v_shift) | (addr << a_shift); \
4443 \
4444 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4445 ADM8211_CSR_READ(SYNRF); \
4446 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4447 ADM8211_CSR_READ(SYNRF); \
4448 \
4449 if (prewrite) { \
4450 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4451 ADM8211_CSR_READ(SYNRF); \
4452 } \
4453 \
4454 for (i = 0; i <= bits; i++) { \
4455 if (bitbuf & (1 << (bits - i))) \
4456 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4457 else \
4458 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4459 \
4460 ADM8211_CSR_WRITE(SYNRF, reg); \
4461 ADM8211_CSR_READ(SYNRF); \
4462 \
4463 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4464 ADM8211_CSR_READ(SYNRF); \
4465 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4466 ADM8211_CSR_READ(SYNRF); \
4467 } \
4468 \
4469 if (postwrite == 1) { \
4470 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4471 ADM8211_CSR_READ(SYNRF); \
4472 } \
4473 if (postwrite == 2) { \
4474 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4475 ADM8211_CSR_READ(SYNRF); \
4476 } \
4477 \
4478 ADM8211_CSR_WRITE(SYNRF, 0); \
4479 ADM8211_CSR_READ(SYNRF); \
4480 }
4481 #undef WRITE_SYN
4482 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4483 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4484 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4485 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4486 #define IEEE80211_DUR_DS_SLOW_ACK 112
4487 #define IEEE80211_DUR_DS_FAST_ACK 56
4488 #define IEEE80211_DUR_DS_SLOW_CTS 112
4489 #define IEEE80211_DUR_DS_FAST_CTS 56
4490 #define IEEE80211_DUR_DS_SLOT 20
4491 #define IEEE80211_DUR_DS_SIFS 10
4492 #ifdef CONFIG_PM
4493 #endif
4494 #ifdef CONFIG_PM
4495 #endif
4496 /* LDV_COMMENT_END_PREP */
4497
4498
4499
4500
4501 }
4502
4503 break;
4504 case 12: {
4505
4506 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4507
4508
4509 /* content: static u64 adm8211_get_tsft(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
4510 /* LDV_COMMENT_BEGIN_PREP */
4511 #define ADM8211_INT(x) \
4512 do { \
4513 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4514 wiphy_debug(dev->wiphy, "%s\n", #x); \
4515 } while (0)
4516 #undef ADM8211_INT
4517 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4518 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4519 u16 addr, u32 value) { \
4520 struct adm8211_priv *priv = dev->priv; \
4521 unsigned int i; \
4522 u32 reg, bitbuf; \
4523 \
4524 value &= v_mask; \
4525 addr &= a_mask; \
4526 bitbuf = (value << v_shift) | (addr << a_shift); \
4527 \
4528 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4529 ADM8211_CSR_READ(SYNRF); \
4530 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4531 ADM8211_CSR_READ(SYNRF); \
4532 \
4533 if (prewrite) { \
4534 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4535 ADM8211_CSR_READ(SYNRF); \
4536 } \
4537 \
4538 for (i = 0; i <= bits; i++) { \
4539 if (bitbuf & (1 << (bits - i))) \
4540 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4541 else \
4542 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4543 \
4544 ADM8211_CSR_WRITE(SYNRF, reg); \
4545 ADM8211_CSR_READ(SYNRF); \
4546 \
4547 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4548 ADM8211_CSR_READ(SYNRF); \
4549 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4550 ADM8211_CSR_READ(SYNRF); \
4551 } \
4552 \
4553 if (postwrite == 1) { \
4554 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4555 ADM8211_CSR_READ(SYNRF); \
4556 } \
4557 if (postwrite == 2) { \
4558 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4559 ADM8211_CSR_READ(SYNRF); \
4560 } \
4561 \
4562 ADM8211_CSR_WRITE(SYNRF, 0); \
4563 ADM8211_CSR_READ(SYNRF); \
4564 }
4565 #undef WRITE_SYN
4566 /* LDV_COMMENT_END_PREP */
4567 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_tsf" from driver structure with callbacks "adm8211_ops" */
4568 ldv_handler_precall();
4569 adm8211_get_tsft( var_group2, var_group4);
4570 /* LDV_COMMENT_BEGIN_PREP */
4571 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4572 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4573 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4574 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4575 #define IEEE80211_DUR_DS_SLOW_ACK 112
4576 #define IEEE80211_DUR_DS_FAST_ACK 56
4577 #define IEEE80211_DUR_DS_SLOW_CTS 112
4578 #define IEEE80211_DUR_DS_FAST_CTS 56
4579 #define IEEE80211_DUR_DS_SLOT 20
4580 #define IEEE80211_DUR_DS_SIFS 10
4581 #ifdef CONFIG_PM
4582 #endif
4583 #ifdef CONFIG_PM
4584 #endif
4585 /* LDV_COMMENT_END_PREP */
4586
4587
4588
4589
4590 }
4591
4592 break;
4593 case 13: {
4594
4595 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4596 if(ldv_s_adm8211_driver_pci_driver==0) {
4597
4598 /* content: static int adm8211_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
4599 /* LDV_COMMENT_BEGIN_PREP */
4600 #define ADM8211_INT(x) \
4601 do { \
4602 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4603 wiphy_debug(dev->wiphy, "%s\n", #x); \
4604 } while (0)
4605 #undef ADM8211_INT
4606 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4607 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4608 u16 addr, u32 value) { \
4609 struct adm8211_priv *priv = dev->priv; \
4610 unsigned int i; \
4611 u32 reg, bitbuf; \
4612 \
4613 value &= v_mask; \
4614 addr &= a_mask; \
4615 bitbuf = (value << v_shift) | (addr << a_shift); \
4616 \
4617 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4618 ADM8211_CSR_READ(SYNRF); \
4619 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4620 ADM8211_CSR_READ(SYNRF); \
4621 \
4622 if (prewrite) { \
4623 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4624 ADM8211_CSR_READ(SYNRF); \
4625 } \
4626 \
4627 for (i = 0; i <= bits; i++) { \
4628 if (bitbuf & (1 << (bits - i))) \
4629 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4630 else \
4631 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4632 \
4633 ADM8211_CSR_WRITE(SYNRF, reg); \
4634 ADM8211_CSR_READ(SYNRF); \
4635 \
4636 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4637 ADM8211_CSR_READ(SYNRF); \
4638 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4639 ADM8211_CSR_READ(SYNRF); \
4640 } \
4641 \
4642 if (postwrite == 1) { \
4643 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4644 ADM8211_CSR_READ(SYNRF); \
4645 } \
4646 if (postwrite == 2) { \
4647 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4648 ADM8211_CSR_READ(SYNRF); \
4649 } \
4650 \
4651 ADM8211_CSR_WRITE(SYNRF, 0); \
4652 ADM8211_CSR_READ(SYNRF); \
4653 }
4654 #undef WRITE_SYN
4655 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4656 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4657 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4658 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4659 #define IEEE80211_DUR_DS_SLOW_ACK 112
4660 #define IEEE80211_DUR_DS_FAST_ACK 56
4661 #define IEEE80211_DUR_DS_SLOW_CTS 112
4662 #define IEEE80211_DUR_DS_FAST_CTS 56
4663 #define IEEE80211_DUR_DS_SLOT 20
4664 #define IEEE80211_DUR_DS_SIFS 10
4665 /* LDV_COMMENT_END_PREP */
4666 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "adm8211_driver". Standart function test for correct return result. */
4667 res_adm8211_probe_35 = adm8211_probe( var_group7, var_adm8211_probe_35_p1);
4668 ldv_check_return_value(res_adm8211_probe_35);
4669 ldv_check_return_value_probe(res_adm8211_probe_35);
4670 if(res_adm8211_probe_35)
4671 goto ldv_module_exit;
4672 /* LDV_COMMENT_BEGIN_PREP */
4673 #ifdef CONFIG_PM
4674 #endif
4675 #ifdef CONFIG_PM
4676 #endif
4677 /* LDV_COMMENT_END_PREP */
4678 ldv_s_adm8211_driver_pci_driver++;
4679
4680 }
4681
4682 }
4683
4684 break;
4685 case 14: {
4686
4687 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4688 if(ldv_s_adm8211_driver_pci_driver==1) {
4689
4690 /* content: static void adm8211_remove(struct pci_dev *pdev)*/
4691 /* LDV_COMMENT_BEGIN_PREP */
4692 #define ADM8211_INT(x) \
4693 do { \
4694 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4695 wiphy_debug(dev->wiphy, "%s\n", #x); \
4696 } while (0)
4697 #undef ADM8211_INT
4698 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4699 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4700 u16 addr, u32 value) { \
4701 struct adm8211_priv *priv = dev->priv; \
4702 unsigned int i; \
4703 u32 reg, bitbuf; \
4704 \
4705 value &= v_mask; \
4706 addr &= a_mask; \
4707 bitbuf = (value << v_shift) | (addr << a_shift); \
4708 \
4709 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4710 ADM8211_CSR_READ(SYNRF); \
4711 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4712 ADM8211_CSR_READ(SYNRF); \
4713 \
4714 if (prewrite) { \
4715 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4716 ADM8211_CSR_READ(SYNRF); \
4717 } \
4718 \
4719 for (i = 0; i <= bits; i++) { \
4720 if (bitbuf & (1 << (bits - i))) \
4721 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4722 else \
4723 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4724 \
4725 ADM8211_CSR_WRITE(SYNRF, reg); \
4726 ADM8211_CSR_READ(SYNRF); \
4727 \
4728 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4729 ADM8211_CSR_READ(SYNRF); \
4730 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4731 ADM8211_CSR_READ(SYNRF); \
4732 } \
4733 \
4734 if (postwrite == 1) { \
4735 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4736 ADM8211_CSR_READ(SYNRF); \
4737 } \
4738 if (postwrite == 2) { \
4739 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4740 ADM8211_CSR_READ(SYNRF); \
4741 } \
4742 \
4743 ADM8211_CSR_WRITE(SYNRF, 0); \
4744 ADM8211_CSR_READ(SYNRF); \
4745 }
4746 #undef WRITE_SYN
4747 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4748 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4749 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4750 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4751 #define IEEE80211_DUR_DS_SLOW_ACK 112
4752 #define IEEE80211_DUR_DS_FAST_ACK 56
4753 #define IEEE80211_DUR_DS_SLOW_CTS 112
4754 #define IEEE80211_DUR_DS_FAST_CTS 56
4755 #define IEEE80211_DUR_DS_SLOT 20
4756 #define IEEE80211_DUR_DS_SIFS 10
4757 /* LDV_COMMENT_END_PREP */
4758 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "adm8211_driver" */
4759 ldv_handler_precall();
4760 adm8211_remove( var_group7);
4761 /* LDV_COMMENT_BEGIN_PREP */
4762 #ifdef CONFIG_PM
4763 #endif
4764 #ifdef CONFIG_PM
4765 #endif
4766 /* LDV_COMMENT_END_PREP */
4767 ldv_s_adm8211_driver_pci_driver=0;
4768
4769 }
4770
4771 }
4772
4773 break;
4774 case 15: {
4775
4776 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4777
4778
4779 /* content: static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)*/
4780 /* LDV_COMMENT_BEGIN_PREP */
4781 #define ADM8211_INT(x) \
4782 do { \
4783 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4784 wiphy_debug(dev->wiphy, "%s\n", #x); \
4785 } while (0)
4786 #undef ADM8211_INT
4787 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4788 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4789 u16 addr, u32 value) { \
4790 struct adm8211_priv *priv = dev->priv; \
4791 unsigned int i; \
4792 u32 reg, bitbuf; \
4793 \
4794 value &= v_mask; \
4795 addr &= a_mask; \
4796 bitbuf = (value << v_shift) | (addr << a_shift); \
4797 \
4798 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4799 ADM8211_CSR_READ(SYNRF); \
4800 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4801 ADM8211_CSR_READ(SYNRF); \
4802 \
4803 if (prewrite) { \
4804 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4805 ADM8211_CSR_READ(SYNRF); \
4806 } \
4807 \
4808 for (i = 0; i <= bits; i++) { \
4809 if (bitbuf & (1 << (bits - i))) \
4810 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4811 else \
4812 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4813 \
4814 ADM8211_CSR_WRITE(SYNRF, reg); \
4815 ADM8211_CSR_READ(SYNRF); \
4816 \
4817 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4818 ADM8211_CSR_READ(SYNRF); \
4819 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4820 ADM8211_CSR_READ(SYNRF); \
4821 } \
4822 \
4823 if (postwrite == 1) { \
4824 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4825 ADM8211_CSR_READ(SYNRF); \
4826 } \
4827 if (postwrite == 2) { \
4828 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4829 ADM8211_CSR_READ(SYNRF); \
4830 } \
4831 \
4832 ADM8211_CSR_WRITE(SYNRF, 0); \
4833 ADM8211_CSR_READ(SYNRF); \
4834 }
4835 #undef WRITE_SYN
4836 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4837 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4838 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4839 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4840 #define IEEE80211_DUR_DS_SLOW_ACK 112
4841 #define IEEE80211_DUR_DS_FAST_ACK 56
4842 #define IEEE80211_DUR_DS_SLOW_CTS 112
4843 #define IEEE80211_DUR_DS_FAST_CTS 56
4844 #define IEEE80211_DUR_DS_SLOT 20
4845 #define IEEE80211_DUR_DS_SIFS 10
4846 #ifdef CONFIG_PM
4847 /* LDV_COMMENT_END_PREP */
4848 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "adm8211_driver" */
4849 ldv_handler_precall();
4850 adm8211_suspend( var_group7, var_adm8211_suspend_37_p1);
4851 /* LDV_COMMENT_BEGIN_PREP */
4852 #endif
4853 #ifdef CONFIG_PM
4854 #endif
4855 /* LDV_COMMENT_END_PREP */
4856
4857
4858
4859
4860 }
4861
4862 break;
4863 case 16: {
4864
4865 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4866
4867
4868 /* content: static int adm8211_resume(struct pci_dev *pdev)*/
4869 /* LDV_COMMENT_BEGIN_PREP */
4870 #define ADM8211_INT(x) \
4871 do { \
4872 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4873 wiphy_debug(dev->wiphy, "%s\n", #x); \
4874 } while (0)
4875 #undef ADM8211_INT
4876 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4877 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4878 u16 addr, u32 value) { \
4879 struct adm8211_priv *priv = dev->priv; \
4880 unsigned int i; \
4881 u32 reg, bitbuf; \
4882 \
4883 value &= v_mask; \
4884 addr &= a_mask; \
4885 bitbuf = (value << v_shift) | (addr << a_shift); \
4886 \
4887 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4888 ADM8211_CSR_READ(SYNRF); \
4889 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4890 ADM8211_CSR_READ(SYNRF); \
4891 \
4892 if (prewrite) { \
4893 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4894 ADM8211_CSR_READ(SYNRF); \
4895 } \
4896 \
4897 for (i = 0; i <= bits; i++) { \
4898 if (bitbuf & (1 << (bits - i))) \
4899 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4900 else \
4901 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4902 \
4903 ADM8211_CSR_WRITE(SYNRF, reg); \
4904 ADM8211_CSR_READ(SYNRF); \
4905 \
4906 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4907 ADM8211_CSR_READ(SYNRF); \
4908 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4909 ADM8211_CSR_READ(SYNRF); \
4910 } \
4911 \
4912 if (postwrite == 1) { \
4913 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4914 ADM8211_CSR_READ(SYNRF); \
4915 } \
4916 if (postwrite == 2) { \
4917 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4918 ADM8211_CSR_READ(SYNRF); \
4919 } \
4920 \
4921 ADM8211_CSR_WRITE(SYNRF, 0); \
4922 ADM8211_CSR_READ(SYNRF); \
4923 }
4924 #undef WRITE_SYN
4925 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4926 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4927 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4928 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4929 #define IEEE80211_DUR_DS_SLOW_ACK 112
4930 #define IEEE80211_DUR_DS_FAST_ACK 56
4931 #define IEEE80211_DUR_DS_SLOW_CTS 112
4932 #define IEEE80211_DUR_DS_FAST_CTS 56
4933 #define IEEE80211_DUR_DS_SLOT 20
4934 #define IEEE80211_DUR_DS_SIFS 10
4935 #ifdef CONFIG_PM
4936 /* LDV_COMMENT_END_PREP */
4937 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "adm8211_driver" */
4938 ldv_handler_precall();
4939 adm8211_resume( var_group7);
4940 /* LDV_COMMENT_BEGIN_PREP */
4941 #endif
4942 #ifdef CONFIG_PM
4943 #endif
4944 /* LDV_COMMENT_END_PREP */
4945
4946
4947
4948
4949 }
4950
4951 break;
4952 case 17: {
4953
4954 /** CALLBACK SECTION request_irq **/
4955 LDV_IN_INTERRUPT=2;
4956
4957 /* content: static irqreturn_t adm8211_interrupt(int irq, void *dev_id)*/
4958 /* LDV_COMMENT_END_PREP */
4959 /* LDV_COMMENT_FUNCTION_CALL */
4960 ldv_handler_precall();
4961 adm8211_interrupt( var_adm8211_interrupt_9_p0, var_adm8211_interrupt_9_p1);
4962 /* LDV_COMMENT_BEGIN_PREP */
4963 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4964 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4965 u16 addr, u32 value) { \
4966 struct adm8211_priv *priv = dev->priv; \
4967 unsigned int i; \
4968 u32 reg, bitbuf; \
4969 \
4970 value &= v_mask; \
4971 addr &= a_mask; \
4972 bitbuf = (value << v_shift) | (addr << a_shift); \
4973 \
4974 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4975 ADM8211_CSR_READ(SYNRF); \
4976 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4977 ADM8211_CSR_READ(SYNRF); \
4978 \
4979 if (prewrite) { \
4980 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4981 ADM8211_CSR_READ(SYNRF); \
4982 } \
4983 \
4984 for (i = 0; i <= bits; i++) { \
4985 if (bitbuf & (1 << (bits - i))) \
4986 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4987 else \
4988 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4989 \
4990 ADM8211_CSR_WRITE(SYNRF, reg); \
4991 ADM8211_CSR_READ(SYNRF); \
4992 \
4993 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4994 ADM8211_CSR_READ(SYNRF); \
4995 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4996 ADM8211_CSR_READ(SYNRF); \
4997 } \
4998 \
4999 if (postwrite == 1) { \
5000 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
5001 ADM8211_CSR_READ(SYNRF); \
5002 } \
5003 if (postwrite == 2) { \
5004 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
5005 ADM8211_CSR_READ(SYNRF); \
5006 } \
5007 \
5008 ADM8211_CSR_WRITE(SYNRF, 0); \
5009 ADM8211_CSR_READ(SYNRF); \
5010 }
5011 #undef WRITE_SYN
5012 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
5013 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
5014 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
5015 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
5016 #define IEEE80211_DUR_DS_SLOW_ACK 112
5017 #define IEEE80211_DUR_DS_FAST_ACK 56
5018 #define IEEE80211_DUR_DS_SLOW_CTS 112
5019 #define IEEE80211_DUR_DS_FAST_CTS 56
5020 #define IEEE80211_DUR_DS_SLOT 20
5021 #define IEEE80211_DUR_DS_SIFS 10
5022 #ifdef CONFIG_PM
5023 #endif
5024 #ifdef CONFIG_PM
5025 #endif
5026 /* LDV_COMMENT_END_PREP */
5027 LDV_IN_INTERRUPT=1;
5028
5029
5030
5031 }
5032
5033 break;
5034 default: break;
5035
5036 }
5037
5038 }
5039
5040 ldv_module_exit:
5041
5042 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
5043 ldv_final: ldv_check_final_state();
5044
5045 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
5046 return;
5047
5048 }
5049 #endif
5050
5051 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 extern void ldv_dma_map_page(void);
9 extern void ldv_dma_mapping_error(void);
10 #line 1 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/3548/dscv_tempdir/dscv/ri/331_1a/drivers/net/wireless/admtek/adm8211.c"
11
12
13 /*
14 * Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP)
15 *
16 * Copyright (c) 2003, Jouni Malinen <j@w1.fi>
17 * Copyright (c) 2004-2007, Michael Wu <flamingice@sourmilk.net>
18 * Some parts copyright (c) 2003 by David Young <dyoung@pobox.com>
19 * and used with permission.
20 *
21 * Much thanks to Infineon-ADMtek for their support of this driver.
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License version 2 as
25 * published by the Free Software Foundation. See README and COPYING for
26 * more details.
27 */
28
29 #include <linux/interrupt.h>
30 #include <linux/if.h>
31 #include <linux/skbuff.h>
32 #include <linux/slab.h>
33 #include <linux/etherdevice.h>
34 #include <linux/pci.h>
35 #include <linux/delay.h>
36 #include <linux/crc32.h>
37 #include <linux/eeprom_93cx6.h>
38 #include <linux/module.h>
39 #include <net/mac80211.h>
40
41 #include "adm8211.h"
42
43 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
44 MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
45 MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211");
46 MODULE_SUPPORTED_DEVICE("ADM8211");
47 MODULE_LICENSE("GPL");
48
49 static unsigned int tx_ring_size __read_mostly = 16;
50 static unsigned int rx_ring_size __read_mostly = 16;
51
52 module_param(tx_ring_size, uint, 0);
53 module_param(rx_ring_size, uint, 0);
54
55 static const struct pci_device_id adm8211_pci_id_table[] = {
56 /* ADMtek ADM8211 */
57 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
58 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */
59 { PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */
60 { PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */
61 { 0 }
62 };
63
64 static struct ieee80211_rate adm8211_rates[] = {
65 { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
66 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
67 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
68 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
69 { .bitrate = 220, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, /* XX ?? */
70 };
71
72 static const struct ieee80211_channel adm8211_channels[] = {
73 { .center_freq = 2412},
74 { .center_freq = 2417},
75 { .center_freq = 2422},
76 { .center_freq = 2427},
77 { .center_freq = 2432},
78 { .center_freq = 2437},
79 { .center_freq = 2442},
80 { .center_freq = 2447},
81 { .center_freq = 2452},
82 { .center_freq = 2457},
83 { .center_freq = 2462},
84 { .center_freq = 2467},
85 { .center_freq = 2472},
86 { .center_freq = 2484},
87 };
88
89
90 static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)
91 {
92 struct adm8211_priv *priv = eeprom->data;
93 u32 reg = ADM8211_CSR_READ(SPR);
94
95 eeprom->reg_data_in = reg & ADM8211_SPR_SDI;
96 eeprom->reg_data_out = reg & ADM8211_SPR_SDO;
97 eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK;
98 eeprom->reg_chip_select = reg & ADM8211_SPR_SCS;
99 }
100
101 static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom)
102 {
103 struct adm8211_priv *priv = eeprom->data;
104 u32 reg = 0x4000 | ADM8211_SPR_SRS;
105
106 if (eeprom->reg_data_in)
107 reg |= ADM8211_SPR_SDI;
108 if (eeprom->reg_data_out)
109 reg |= ADM8211_SPR_SDO;
110 if (eeprom->reg_data_clock)
111 reg |= ADM8211_SPR_SCLK;
112 if (eeprom->reg_chip_select)
113 reg |= ADM8211_SPR_SCS;
114
115 ADM8211_CSR_WRITE(SPR, reg);
116 ADM8211_CSR_READ(SPR); /* eeprom_delay */
117 }
118
119 static int adm8211_read_eeprom(struct ieee80211_hw *dev)
120 {
121 struct adm8211_priv *priv = dev->priv;
122 unsigned int words, i;
123 struct ieee80211_chan_range chan_range;
124 u16 cr49;
125 struct eeprom_93cx6 eeprom = {
126 .data = priv,
127 .register_read = adm8211_eeprom_register_read,
128 .register_write = adm8211_eeprom_register_write
129 };
130
131 if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) {
132 /* 256 * 16-bit = 512 bytes */
133 eeprom.width = PCI_EEPROM_WIDTH_93C66;
134 words = 256;
135 } else {
136 /* 64 * 16-bit = 128 bytes */
137 eeprom.width = PCI_EEPROM_WIDTH_93C46;
138 words = 64;
139 }
140
141 priv->eeprom_len = words * 2;
142 priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL);
143 if (!priv->eeprom)
144 return -ENOMEM;
145
146 eeprom_93cx6_multiread(&eeprom, 0, (__le16 *)priv->eeprom, words);
147
148 cr49 = le16_to_cpu(priv->eeprom->cr49);
149 priv->rf_type = (cr49 >> 3) & 0x7;
150 switch (priv->rf_type) {
151 case ADM8211_TYPE_INTERSIL:
152 case ADM8211_TYPE_RFMD:
153 case ADM8211_TYPE_MARVEL:
154 case ADM8211_TYPE_AIROHA:
155 case ADM8211_TYPE_ADMTEK:
156 break;
157
158 default:
159 if (priv->pdev->revision < ADM8211_REV_CA)
160 priv->rf_type = ADM8211_TYPE_RFMD;
161 else
162 priv->rf_type = ADM8211_TYPE_AIROHA;
163
164 printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n",
165 pci_name(priv->pdev), (cr49 >> 3) & 0x7);
166 }
167
168 priv->bbp_type = cr49 & 0x7;
169 switch (priv->bbp_type) {
170 case ADM8211_TYPE_INTERSIL:
171 case ADM8211_TYPE_RFMD:
172 case ADM8211_TYPE_MARVEL:
173 case ADM8211_TYPE_AIROHA:
174 case ADM8211_TYPE_ADMTEK:
175 break;
176 default:
177 if (priv->pdev->revision < ADM8211_REV_CA)
178 priv->bbp_type = ADM8211_TYPE_RFMD;
179 else
180 priv->bbp_type = ADM8211_TYPE_ADMTEK;
181
182 printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n",
183 pci_name(priv->pdev), cr49 >> 3);
184 }
185
186 if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) {
187 printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n",
188 pci_name(priv->pdev), priv->eeprom->country_code);
189
190 chan_range = cranges[2];
191 } else
192 chan_range = cranges[priv->eeprom->country_code];
193
194 printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n",
195 pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max);
196
197 BUILD_BUG_ON(sizeof(priv->channels) != sizeof(adm8211_channels));
198
199 memcpy(priv->channels, adm8211_channels, sizeof(priv->channels));
200 priv->band.channels = priv->channels;
201 priv->band.n_channels = ARRAY_SIZE(adm8211_channels);
202 priv->band.bitrates = adm8211_rates;
203 priv->band.n_bitrates = ARRAY_SIZE(adm8211_rates);
204
205 for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++)
206 if (i < chan_range.min || i > chan_range.max)
207 priv->channels[i - 1].flags |= IEEE80211_CHAN_DISABLED;
208
209 switch (priv->eeprom->specific_bbptype) {
210 case ADM8211_BBP_RFMD3000:
211 case ADM8211_BBP_RFMD3002:
212 case ADM8211_BBP_ADM8011:
213 priv->specific_bbptype = priv->eeprom->specific_bbptype;
214 break;
215
216 default:
217 if (priv->pdev->revision < ADM8211_REV_CA)
218 priv->specific_bbptype = ADM8211_BBP_RFMD3000;
219 else
220 priv->specific_bbptype = ADM8211_BBP_ADM8011;
221
222 printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n",
223 pci_name(priv->pdev), priv->eeprom->specific_bbptype);
224 }
225
226 switch (priv->eeprom->specific_rftype) {
227 case ADM8211_RFMD2948:
228 case ADM8211_RFMD2958:
229 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
230 case ADM8211_MAX2820:
231 case ADM8211_AL2210L:
232 priv->transceiver_type = priv->eeprom->specific_rftype;
233 break;
234
235 default:
236 if (priv->pdev->revision == ADM8211_REV_BA)
237 priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER;
238 else if (priv->pdev->revision == ADM8211_REV_CA)
239 priv->transceiver_type = ADM8211_AL2210L;
240 else if (priv->pdev->revision == ADM8211_REV_AB)
241 priv->transceiver_type = ADM8211_RFMD2948;
242
243 printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n",
244 pci_name(priv->pdev), priv->eeprom->specific_rftype);
245
246 break;
247 }
248
249 printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d "
250 "Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type,
251 priv->bbp_type, priv->specific_bbptype, priv->transceiver_type);
252
253 return 0;
254 }
255
256 static inline void adm8211_write_sram(struct ieee80211_hw *dev,
257 u32 addr, u32 data)
258 {
259 struct adm8211_priv *priv = dev->priv;
260
261 ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR |
262 (priv->pdev->revision < ADM8211_REV_BA ?
263 0 : ADM8211_WEPCTL_SEL_WEPTABLE ));
264 ADM8211_CSR_READ(WEPCTL);
265 msleep(1);
266
267 ADM8211_CSR_WRITE(WESK, data);
268 ADM8211_CSR_READ(WESK);
269 msleep(1);
270 }
271
272 static void adm8211_write_sram_bytes(struct ieee80211_hw *dev,
273 unsigned int addr, u8 *buf,
274 unsigned int len)
275 {
276 struct adm8211_priv *priv = dev->priv;
277 u32 reg = ADM8211_CSR_READ(WEPCTL);
278 unsigned int i;
279
280 if (priv->pdev->revision < ADM8211_REV_BA) {
281 for (i = 0; i < len; i += 2) {
282 u16 val = buf[i] | (buf[i + 1] << 8);
283 adm8211_write_sram(dev, addr + i / 2, val);
284 }
285 } else {
286 for (i = 0; i < len; i += 4) {
287 u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) |
288 (buf[i + 2] << 16) | (buf[i + 3] << 24);
289 adm8211_write_sram(dev, addr + i / 4, val);
290 }
291 }
292
293 ADM8211_CSR_WRITE(WEPCTL, reg);
294 }
295
296 static void adm8211_clear_sram(struct ieee80211_hw *dev)
297 {
298 struct adm8211_priv *priv = dev->priv;
299 u32 reg = ADM8211_CSR_READ(WEPCTL);
300 unsigned int addr;
301
302 for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++)
303 adm8211_write_sram(dev, addr, 0);
304
305 ADM8211_CSR_WRITE(WEPCTL, reg);
306 }
307
308 static int adm8211_get_stats(struct ieee80211_hw *dev,
309 struct ieee80211_low_level_stats *stats)
310 {
311 struct adm8211_priv *priv = dev->priv;
312
313 memcpy(stats, &priv->stats, sizeof(*stats));
314
315 return 0;
316 }
317
318 static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
319 {
320 struct adm8211_priv *priv = dev->priv;
321 unsigned int dirty_tx;
322
323 spin_lock(&priv->lock);
324
325 for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) {
326 unsigned int entry = dirty_tx % priv->tx_ring_size;
327 u32 status = le32_to_cpu(priv->tx_ring[entry].status);
328 struct ieee80211_tx_info *txi;
329 struct adm8211_tx_ring_info *info;
330 struct sk_buff *skb;
331
332 if (status & TDES0_CONTROL_OWN ||
333 !(status & TDES0_CONTROL_DONE))
334 break;
335
336 info = &priv->tx_buffers[entry];
337 skb = info->skb;
338 txi = IEEE80211_SKB_CB(skb);
339
340 /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
341
342 pci_unmap_single(priv->pdev, info->mapping,
343 info->skb->len, PCI_DMA_TODEVICE);
344
345 ieee80211_tx_info_clear_status(txi);
346
347 skb_pull(skb, sizeof(struct adm8211_tx_hdr));
348 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
349 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) &&
350 !(status & TDES0_STATUS_ES))
351 txi->flags |= IEEE80211_TX_STAT_ACK;
352
353 ieee80211_tx_status_irqsafe(dev, skb);
354
355 info->skb = NULL;
356 }
357
358 if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2)
359 ieee80211_wake_queue(dev, 0);
360
361 priv->dirty_tx = dirty_tx;
362 spin_unlock(&priv->lock);
363 }
364
365
366 static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
367 {
368 struct adm8211_priv *priv = dev->priv;
369 unsigned int entry = priv->cur_rx % priv->rx_ring_size;
370 u32 status;
371 unsigned int pktlen;
372 struct sk_buff *skb, *newskb;
373 unsigned int limit = priv->rx_ring_size;
374 u8 rssi, rate;
375
376 while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) {
377 if (!limit--)
378 break;
379
380 status = le32_to_cpu(priv->rx_ring[entry].status);
381 rate = (status & RDES0_STATUS_RXDR) >> 12;
382 rssi = le32_to_cpu(priv->rx_ring[entry].length) &
383 RDES1_STATUS_RSSI;
384
385 pktlen = status & RDES0_STATUS_FL;
386 if (pktlen > RX_PKT_SIZE) {
387 if (net_ratelimit())
388 wiphy_debug(dev->wiphy, "frame too long (%d)\n",
389 pktlen);
390 pktlen = RX_PKT_SIZE;
391 }
392
393 if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) {
394 skb = NULL; /* old buffer will be reused */
395 /* TODO: update RX error stats */
396 /* TODO: check RDES0_STATUS_CRC*E */
397 } else if (pktlen < RX_COPY_BREAK) {
398 skb = dev_alloc_skb(pktlen);
399 if (skb) {
400 pci_dma_sync_single_for_cpu(
401 priv->pdev,
402 priv->rx_buffers[entry].mapping,
403 pktlen, PCI_DMA_FROMDEVICE);
404 memcpy(skb_put(skb, pktlen),
405 skb_tail_pointer(priv->rx_buffers[entry].skb),
406 pktlen);
407 pci_dma_sync_single_for_device(
408 priv->pdev,
409 priv->rx_buffers[entry].mapping,
410 RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
411 }
412 } else {
413 newskb = dev_alloc_skb(RX_PKT_SIZE);
414 if (newskb) {
415 skb = priv->rx_buffers[entry].skb;
416 skb_put(skb, pktlen);
417 pci_unmap_single(
418 priv->pdev,
419 priv->rx_buffers[entry].mapping,
420 RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
421 priv->rx_buffers[entry].skb = newskb;
422 priv->rx_buffers[entry].mapping =
423 pci_map_single(priv->pdev,
424 skb_tail_pointer(newskb),
425 RX_PKT_SIZE,
426 PCI_DMA_FROMDEVICE);
427 } else {
428 skb = NULL;
429 /* TODO: update rx dropped stats */
430 }
431
432 priv->rx_ring[entry].buffer1 =
433 cpu_to_le32(priv->rx_buffers[entry].mapping);
434 }
435
436 priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN |
437 RDES0_STATUS_SQL);
438 priv->rx_ring[entry].length =
439 cpu_to_le32(RX_PKT_SIZE |
440 (entry == priv->rx_ring_size - 1 ?
441 RDES1_CONTROL_RER : 0));
442
443 if (skb) {
444 struct ieee80211_rx_status rx_status = {0};
445
446 if (priv->pdev->revision < ADM8211_REV_CA)
447 rx_status.signal = rssi;
448 else
449 rx_status.signal = 100 - rssi;
450
451 rx_status.rate_idx = rate;
452
453 rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
454 rx_status.band = NL80211_BAND_2GHZ;
455
456 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
457 ieee80211_rx_irqsafe(dev, skb);
458 }
459
460 entry = (++priv->cur_rx) % priv->rx_ring_size;
461 }
462
463 /* TODO: check LPC and update stats? */
464 }
465
466
467 static irqreturn_t adm8211_interrupt(int irq, void *dev_id)
468 {
469 #define ADM8211_INT(x) \
470 do { \
471 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
472 wiphy_debug(dev->wiphy, "%s\n", #x); \
473 } while (0)
474
475 struct ieee80211_hw *dev = dev_id;
476 struct adm8211_priv *priv = dev->priv;
477 u32 stsr = ADM8211_CSR_READ(STSR);
478 ADM8211_CSR_WRITE(STSR, stsr);
479 if (stsr == 0xffffffff)
480 return IRQ_HANDLED;
481
482 if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS)))
483 return IRQ_HANDLED;
484
485 if (stsr & ADM8211_STSR_RCI)
486 adm8211_interrupt_rci(dev);
487 if (stsr & ADM8211_STSR_TCI)
488 adm8211_interrupt_tci(dev);
489
490 ADM8211_INT(PCF);
491 ADM8211_INT(BCNTC);
492 ADM8211_INT(GPINT);
493 ADM8211_INT(ATIMTC);
494 ADM8211_INT(TSFTF);
495 ADM8211_INT(TSCZ);
496 ADM8211_INT(SQL);
497 ADM8211_INT(WEPTD);
498 ADM8211_INT(ATIME);
499 ADM8211_INT(TEIS);
500 ADM8211_INT(FBE);
501 ADM8211_INT(REIS);
502 ADM8211_INT(GPTT);
503 ADM8211_INT(RPS);
504 ADM8211_INT(RDU);
505 ADM8211_INT(TUF);
506 ADM8211_INT(TPS);
507
508 return IRQ_HANDLED;
509
510 #undef ADM8211_INT
511 }
512
513 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
514 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
515 u16 addr, u32 value) { \
516 struct adm8211_priv *priv = dev->priv; \
517 unsigned int i; \
518 u32 reg, bitbuf; \
519 \
520 value &= v_mask; \
521 addr &= a_mask; \
522 bitbuf = (value << v_shift) | (addr << a_shift); \
523 \
524 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
525 ADM8211_CSR_READ(SYNRF); \
526 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
527 ADM8211_CSR_READ(SYNRF); \
528 \
529 if (prewrite) { \
530 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
531 ADM8211_CSR_READ(SYNRF); \
532 } \
533 \
534 for (i = 0; i <= bits; i++) { \
535 if (bitbuf & (1 << (bits - i))) \
536 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
537 else \
538 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
539 \
540 ADM8211_CSR_WRITE(SYNRF, reg); \
541 ADM8211_CSR_READ(SYNRF); \
542 \
543 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
544 ADM8211_CSR_READ(SYNRF); \
545 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
546 ADM8211_CSR_READ(SYNRF); \
547 } \
548 \
549 if (postwrite == 1) { \
550 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
551 ADM8211_CSR_READ(SYNRF); \
552 } \
553 if (postwrite == 2) { \
554 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
555 ADM8211_CSR_READ(SYNRF); \
556 } \
557 \
558 ADM8211_CSR_WRITE(SYNRF, 0); \
559 ADM8211_CSR_READ(SYNRF); \
560 }
561
562 WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1)
563 WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1)
564 WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1)
565 WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2)
566
567 #undef WRITE_SYN
568
569 static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data)
570 {
571 struct adm8211_priv *priv = dev->priv;
572 unsigned int timeout;
573 u32 reg;
574
575 timeout = 10;
576 while (timeout > 0) {
577 reg = ADM8211_CSR_READ(BBPCTL);
578 if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD)))
579 break;
580 timeout--;
581 msleep(2);
582 }
583
584 if (timeout == 0) {
585 wiphy_debug(dev->wiphy,
586 "adm8211_write_bbp(%d,%d) failed prewrite (reg=0x%08x)\n",
587 addr, data, reg);
588 return -ETIMEDOUT;
589 }
590
591 switch (priv->bbp_type) {
592 case ADM8211_TYPE_INTERSIL:
593 reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */
594 break;
595 case ADM8211_TYPE_RFMD:
596 reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP |
597 (0x01 << 18);
598 break;
599 case ADM8211_TYPE_ADMTEK:
600 reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP |
601 (0x05 << 18);
602 break;
603 }
604 reg |= ADM8211_BBPCTL_WR | (addr << 8) | data;
605
606 ADM8211_CSR_WRITE(BBPCTL, reg);
607
608 timeout = 10;
609 while (timeout > 0) {
610 reg = ADM8211_CSR_READ(BBPCTL);
611 if (!(reg & ADM8211_BBPCTL_WR))
612 break;
613 timeout--;
614 msleep(2);
615 }
616
617 if (timeout == 0) {
618 ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) &
619 ~ADM8211_BBPCTL_WR);
620 wiphy_debug(dev->wiphy,
621 "adm8211_write_bbp(%d,%d) failed postwrite (reg=0x%08x)\n",
622 addr, data, reg);
623 return -ETIMEDOUT;
624 }
625
626 return 0;
627 }
628
629 static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
630 {
631 static const u32 adm8211_rfmd2958_reg5[] =
632 {0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340,
633 0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7};
634 static const u32 adm8211_rfmd2958_reg6[] =
635 {0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000,
636 0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745};
637
638 struct adm8211_priv *priv = dev->priv;
639 u8 ant_power = priv->ant_power > 0x3F ?
640 priv->eeprom->antenna_power[chan - 1] : priv->ant_power;
641 u8 tx_power = priv->tx_power > 0x3F ?
642 priv->eeprom->tx_power[chan - 1] : priv->tx_power;
643 u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ?
644 priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff;
645 u8 lnags_thresh = priv->lnags_threshold == 0xFF ?
646 priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold;
647 u32 reg;
648
649 ADM8211_IDLE();
650
651 /* Program synthesizer to new channel */
652 switch (priv->transceiver_type) {
653 case ADM8211_RFMD2958:
654 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
655 adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007);
656 adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033);
657
658 adm8211_rf_write_syn_rfmd2958(dev, 0x05,
659 adm8211_rfmd2958_reg5[chan - 1]);
660 adm8211_rf_write_syn_rfmd2958(dev, 0x06,
661 adm8211_rfmd2958_reg6[chan - 1]);
662 break;
663
664 case ADM8211_RFMD2948:
665 adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF,
666 SI4126_MAIN_XINDIV2);
667 adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN,
668 SI4126_POWERDOWN_PDIB |
669 SI4126_POWERDOWN_PDRB);
670 adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0);
671 adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV,
672 (chan == 14 ?
673 2110 : (2033 + (chan * 5))));
674 adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496);
675 adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44);
676 adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44);
677 break;
678
679 case ADM8211_MAX2820:
680 adm8211_rf_write_syn_max2820(dev, 0x3,
681 (chan == 14 ? 0x054 : (0x7 + (chan * 5))));
682 break;
683
684 case ADM8211_AL2210L:
685 adm8211_rf_write_syn_al2210l(dev, 0x0,
686 (chan == 14 ? 0x229B4 : (0x22967 + (chan * 5))));
687 break;
688
689 default:
690 wiphy_debug(dev->wiphy, "unsupported transceiver type %d\n",
691 priv->transceiver_type);
692 break;
693 }
694
695 /* write BBP regs */
696 if (priv->bbp_type == ADM8211_TYPE_RFMD) {
697
698 /* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */
699 /* TODO: remove if SMC 2635W doesn't need this */
700 if (priv->transceiver_type == ADM8211_RFMD2948) {
701 reg = ADM8211_CSR_READ(GPIO);
702 reg &= 0xfffc0000;
703 reg |= ADM8211_CSR_GPIO_EN0;
704 if (chan != 14)
705 reg |= ADM8211_CSR_GPIO_O0;
706 ADM8211_CSR_WRITE(GPIO, reg);
707 }
708
709 if (priv->transceiver_type == ADM8211_RFMD2958) {
710 /* set PCNT2 */
711 adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100);
712 /* set PCNT1 P_DESIRED/MID_BIAS */
713 reg = le16_to_cpu(priv->eeprom->cr49);
714 reg >>= 13;
715 reg <<= 15;
716 reg |= ant_power << 9;
717 adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg);
718 /* set TXRX TX_GAIN */
719 adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 |
720 (priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0));
721 } else {
722 reg = ADM8211_CSR_READ(PLCPHD);
723 reg &= 0xff00ffff;
724 reg |= tx_power << 18;
725 ADM8211_CSR_WRITE(PLCPHD, reg);
726 }
727
728 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF |
729 ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST);
730 ADM8211_CSR_READ(SYNRF);
731 msleep(30);
732
733 /* RF3000 BBP */
734 if (priv->transceiver_type != ADM8211_RFMD2958)
735 adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT,
736 tx_power<<2);
737 adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff);
738 adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh);
739 adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ?
740 priv->eeprom->cr28 : 0);
741 adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29);
742
743 ADM8211_CSR_WRITE(SYNRF, 0);
744
745 /* Nothing to do for ADMtek BBP */
746 } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
747 wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
748 priv->bbp_type);
749
750 ADM8211_RESTORE();
751
752 /* update current channel for adhoc (and maybe AP mode) */
753 reg = ADM8211_CSR_READ(CAP0);
754 reg &= ~0xF;
755 reg |= chan;
756 ADM8211_CSR_WRITE(CAP0, reg);
757
758 return 0;
759 }
760
761 static void adm8211_update_mode(struct ieee80211_hw *dev)
762 {
763 struct adm8211_priv *priv = dev->priv;
764
765 ADM8211_IDLE();
766
767 priv->soft_rx_crc = 0;
768 switch (priv->mode) {
769 case NL80211_IFTYPE_STATION:
770 priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA);
771 priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR;
772 break;
773 case NL80211_IFTYPE_ADHOC:
774 priv->nar &= ~ADM8211_NAR_PR;
775 priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR;
776
777 /* don't trust the error bits on rev 0x20 and up in adhoc */
778 if (priv->pdev->revision >= ADM8211_REV_BA)
779 priv->soft_rx_crc = 1;
780 break;
781 case NL80211_IFTYPE_MONITOR:
782 priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST);
783 priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR;
784 break;
785 }
786
787 ADM8211_RESTORE();
788 }
789
790 static void adm8211_hw_init_syn(struct ieee80211_hw *dev)
791 {
792 struct adm8211_priv *priv = dev->priv;
793
794 switch (priv->transceiver_type) {
795 case ADM8211_RFMD2958:
796 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
797 /* comments taken from ADMtek vendor driver */
798
799 /* Reset RF2958 after power on */
800 adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000);
801 /* Initialize RF VCO Core Bias to maximum */
802 adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F);
803 /* Initialize IF PLL */
804 adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03);
805 /* Initialize IF PLL Coarse Tuning */
806 adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F);
807 /* Initialize RF PLL */
808 adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403);
809 /* Initialize RF PLL Coarse Tuning */
810 adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F);
811 /* Initialize TX gain and filter BW (R9) */
812 adm8211_rf_write_syn_rfmd2958(dev, 0x09,
813 (priv->transceiver_type == ADM8211_RFMD2958 ?
814 0x10050 : 0x00050));
815 /* Initialize CAL register */
816 adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8);
817 break;
818
819 case ADM8211_MAX2820:
820 adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E);
821 adm8211_rf_write_syn_max2820(dev, 0x2, 0x001);
822 adm8211_rf_write_syn_max2820(dev, 0x3, 0x054);
823 adm8211_rf_write_syn_max2820(dev, 0x4, 0x310);
824 adm8211_rf_write_syn_max2820(dev, 0x5, 0x000);
825 break;
826
827 case ADM8211_AL2210L:
828 adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C);
829 adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB);
830 adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F);
831 adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9);
832 adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280);
833 adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641);
834 adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130);
835 adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000);
836 adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F);
837 adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C);
838 adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000);
839 adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000);
840 break;
841
842 case ADM8211_RFMD2948:
843 default:
844 break;
845 }
846 }
847
848 static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
849 {
850 struct adm8211_priv *priv = dev->priv;
851 u32 reg;
852
853 /* write addresses */
854 if (priv->bbp_type == ADM8211_TYPE_INTERSIL) {
855 ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A);
856 ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E);
857 ADM8211_CSR_WRITE(MMIRD1, 0x00100000);
858 } else if (priv->bbp_type == ADM8211_TYPE_RFMD ||
859 priv->bbp_type == ADM8211_TYPE_ADMTEK) {
860 /* check specific BBP type */
861 switch (priv->specific_bbptype) {
862 case ADM8211_BBP_RFMD3000:
863 case ADM8211_BBP_RFMD3002:
864 ADM8211_CSR_WRITE(MMIWA, 0x00009101);
865 ADM8211_CSR_WRITE(MMIRD0, 0x00000301);
866 break;
867
868 case ADM8211_BBP_ADM8011:
869 ADM8211_CSR_WRITE(MMIWA, 0x00008903);
870 ADM8211_CSR_WRITE(MMIRD0, 0x00001716);
871
872 reg = ADM8211_CSR_READ(BBPCTL);
873 reg &= ~ADM8211_BBPCTL_TYPE;
874 reg |= 0x5 << 18;
875 ADM8211_CSR_WRITE(BBPCTL, reg);
876 break;
877 }
878
879 switch (priv->pdev->revision) {
880 case ADM8211_REV_CA:
881 if (priv->transceiver_type == ADM8211_RFMD2958 ||
882 priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER ||
883 priv->transceiver_type == ADM8211_RFMD2948)
884 ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22);
885 else if (priv->transceiver_type == ADM8211_MAX2820 ||
886 priv->transceiver_type == ADM8211_AL2210L)
887 ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22);
888 break;
889
890 case ADM8211_REV_BA:
891 reg = ADM8211_CSR_READ(MMIRD1);
892 reg &= 0x0000FFFF;
893 reg |= 0x7e100000;
894 ADM8211_CSR_WRITE(MMIRD1, reg);
895 break;
896
897 case ADM8211_REV_AB:
898 case ADM8211_REV_AF:
899 default:
900 ADM8211_CSR_WRITE(MMIRD1, 0x7e100000);
901 break;
902 }
903
904 /* For RFMD */
905 ADM8211_CSR_WRITE(MACTEST, 0x800);
906 }
907
908 adm8211_hw_init_syn(dev);
909
910 /* Set RF Power control IF pin to PE1+PHYRST# */
911 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF |
912 ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST);
913 ADM8211_CSR_READ(SYNRF);
914 msleep(20);
915
916 /* write BBP regs */
917 if (priv->bbp_type == ADM8211_TYPE_RFMD) {
918 /* RF3000 BBP */
919 /* another set:
920 * 11: c8
921 * 14: 14
922 * 15: 50 (chan 1..13; chan 14: d0)
923 * 1c: 00
924 * 1d: 84
925 */
926 adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80);
927 /* antenna selection: diversity */
928 adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80);
929 adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74);
930 adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38);
931 adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40);
932
933 if (priv->eeprom->major_version < 2) {
934 adm8211_write_bbp(dev, 0x1c, 0x00);
935 adm8211_write_bbp(dev, 0x1d, 0x80);
936 } else {
937 if (priv->pdev->revision == ADM8211_REV_BA)
938 adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28);
939 else
940 adm8211_write_bbp(dev, 0x1c, 0x00);
941
942 adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29);
943 }
944 } else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) {
945 /* reset baseband */
946 adm8211_write_bbp(dev, 0x00, 0xFF);
947 /* antenna selection: diversity */
948 adm8211_write_bbp(dev, 0x07, 0x0A);
949
950 /* TODO: find documentation for this */
951 switch (priv->transceiver_type) {
952 case ADM8211_RFMD2958:
953 case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
954 adm8211_write_bbp(dev, 0x00, 0x00);
955 adm8211_write_bbp(dev, 0x01, 0x00);
956 adm8211_write_bbp(dev, 0x02, 0x00);
957 adm8211_write_bbp(dev, 0x03, 0x00);
958 adm8211_write_bbp(dev, 0x06, 0x0f);
959 adm8211_write_bbp(dev, 0x09, 0x00);
960 adm8211_write_bbp(dev, 0x0a, 0x00);
961 adm8211_write_bbp(dev, 0x0b, 0x00);
962 adm8211_write_bbp(dev, 0x0c, 0x00);
963 adm8211_write_bbp(dev, 0x0f, 0xAA);
964 adm8211_write_bbp(dev, 0x10, 0x8c);
965 adm8211_write_bbp(dev, 0x11, 0x43);
966 adm8211_write_bbp(dev, 0x18, 0x40);
967 adm8211_write_bbp(dev, 0x20, 0x23);
968 adm8211_write_bbp(dev, 0x21, 0x02);
969 adm8211_write_bbp(dev, 0x22, 0x28);
970 adm8211_write_bbp(dev, 0x23, 0x30);
971 adm8211_write_bbp(dev, 0x24, 0x2d);
972 adm8211_write_bbp(dev, 0x28, 0x35);
973 adm8211_write_bbp(dev, 0x2a, 0x8c);
974 adm8211_write_bbp(dev, 0x2b, 0x81);
975 adm8211_write_bbp(dev, 0x2c, 0x44);
976 adm8211_write_bbp(dev, 0x2d, 0x0A);
977 adm8211_write_bbp(dev, 0x29, 0x40);
978 adm8211_write_bbp(dev, 0x60, 0x08);
979 adm8211_write_bbp(dev, 0x64, 0x01);
980 break;
981
982 case ADM8211_MAX2820:
983 adm8211_write_bbp(dev, 0x00, 0x00);
984 adm8211_write_bbp(dev, 0x01, 0x00);
985 adm8211_write_bbp(dev, 0x02, 0x00);
986 adm8211_write_bbp(dev, 0x03, 0x00);
987 adm8211_write_bbp(dev, 0x06, 0x0f);
988 adm8211_write_bbp(dev, 0x09, 0x05);
989 adm8211_write_bbp(dev, 0x0a, 0x02);
990 adm8211_write_bbp(dev, 0x0b, 0x00);
991 adm8211_write_bbp(dev, 0x0c, 0x0f);
992 adm8211_write_bbp(dev, 0x0f, 0x55);
993 adm8211_write_bbp(dev, 0x10, 0x8d);
994 adm8211_write_bbp(dev, 0x11, 0x43);
995 adm8211_write_bbp(dev, 0x18, 0x4a);
996 adm8211_write_bbp(dev, 0x20, 0x20);
997 adm8211_write_bbp(dev, 0x21, 0x02);
998 adm8211_write_bbp(dev, 0x22, 0x23);
999 adm8211_write_bbp(dev, 0x23, 0x30);
1000 adm8211_write_bbp(dev, 0x24, 0x2d);
1001 adm8211_write_bbp(dev, 0x2a, 0x8c);
1002 adm8211_write_bbp(dev, 0x2b, 0x81);
1003 adm8211_write_bbp(dev, 0x2c, 0x44);
1004 adm8211_write_bbp(dev, 0x29, 0x4a);
1005 adm8211_write_bbp(dev, 0x60, 0x2b);
1006 adm8211_write_bbp(dev, 0x64, 0x01);
1007 break;
1008
1009 case ADM8211_AL2210L:
1010 adm8211_write_bbp(dev, 0x00, 0x00);
1011 adm8211_write_bbp(dev, 0x01, 0x00);
1012 adm8211_write_bbp(dev, 0x02, 0x00);
1013 adm8211_write_bbp(dev, 0x03, 0x00);
1014 adm8211_write_bbp(dev, 0x06, 0x0f);
1015 adm8211_write_bbp(dev, 0x07, 0x05);
1016 adm8211_write_bbp(dev, 0x08, 0x03);
1017 adm8211_write_bbp(dev, 0x09, 0x00);
1018 adm8211_write_bbp(dev, 0x0a, 0x00);
1019 adm8211_write_bbp(dev, 0x0b, 0x00);
1020 adm8211_write_bbp(dev, 0x0c, 0x10);
1021 adm8211_write_bbp(dev, 0x0f, 0x55);
1022 adm8211_write_bbp(dev, 0x10, 0x8d);
1023 adm8211_write_bbp(dev, 0x11, 0x43);
1024 adm8211_write_bbp(dev, 0x18, 0x4a);
1025 adm8211_write_bbp(dev, 0x20, 0x20);
1026 adm8211_write_bbp(dev, 0x21, 0x02);
1027 adm8211_write_bbp(dev, 0x22, 0x23);
1028 adm8211_write_bbp(dev, 0x23, 0x30);
1029 adm8211_write_bbp(dev, 0x24, 0x2d);
1030 adm8211_write_bbp(dev, 0x2a, 0xaa);
1031 adm8211_write_bbp(dev, 0x2b, 0x81);
1032 adm8211_write_bbp(dev, 0x2c, 0x44);
1033 adm8211_write_bbp(dev, 0x29, 0xfa);
1034 adm8211_write_bbp(dev, 0x60, 0x2d);
1035 adm8211_write_bbp(dev, 0x64, 0x01);
1036 break;
1037
1038 case ADM8211_RFMD2948:
1039 break;
1040
1041 default:
1042 wiphy_debug(dev->wiphy, "unsupported transceiver %d\n",
1043 priv->transceiver_type);
1044 break;
1045 }
1046 } else
1047 wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
1048
1049 ADM8211_CSR_WRITE(SYNRF, 0);
1050
1051 /* Set RF CAL control source to MAC control */
1052 reg = ADM8211_CSR_READ(SYNCTL);
1053 reg |= ADM8211_SYNCTL_SELCAL;
1054 ADM8211_CSR_WRITE(SYNCTL, reg);
1055
1056 return 0;
1057 }
1058
1059 /* configures hw beacons/probe responses */
1060 static int adm8211_set_rate(struct ieee80211_hw *dev)
1061 {
1062 struct adm8211_priv *priv = dev->priv;
1063 u32 reg;
1064 int i = 0;
1065 u8 rate_buf[12] = {0};
1066
1067 /* write supported rates */
1068 if (priv->pdev->revision != ADM8211_REV_BA) {
1069 rate_buf[0] = ARRAY_SIZE(adm8211_rates);
1070 for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++)
1071 rate_buf[i + 1] = (adm8211_rates[i].bitrate / 5) | 0x80;
1072 } else {
1073 /* workaround for rev BA specific bug */
1074 rate_buf[0] = 0x04;
1075 rate_buf[1] = 0x82;
1076 rate_buf[2] = 0x04;
1077 rate_buf[3] = 0x0b;
1078 rate_buf[4] = 0x16;
1079 }
1080
1081 adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf,
1082 ARRAY_SIZE(adm8211_rates) + 1);
1083
1084 reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */
1085 reg |= 1 << 15; /* short preamble */
1086 reg |= 110 << 24;
1087 ADM8211_CSR_WRITE(PLCPHD, reg);
1088
1089 /* MTMLT = 512 TU (max TX MSDU lifetime)
1090 * BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate)
1091 * SRTYLIM = 224 (short retry limit, TX header value is default) */
1092 ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0));
1093
1094 return 0;
1095 }
1096
1097 static void adm8211_hw_init(struct ieee80211_hw *dev)
1098 {
1099 struct adm8211_priv *priv = dev->priv;
1100 u32 reg;
1101 u8 cline;
1102
1103 reg = ADM8211_CSR_READ(PAR);
1104 reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME;
1105 reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL);
1106
1107 if (!pci_set_mwi(priv->pdev)) {
1108 reg |= 0x1 << 24;
1109 pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
1110
1111 switch (cline) {
1112 case 0x8:
1113 reg |= (0x1 << 14);
1114 break;
1115 case 0x10:
1116 reg |= (0x2 << 14);
1117 break;
1118 case 0x20:
1119 reg |= (0x3 << 14);
1120 break;
1121 default:
1122 reg |= (0x0 << 14);
1123 break;
1124 }
1125 }
1126
1127 ADM8211_CSR_WRITE(PAR, reg);
1128
1129 reg = ADM8211_CSR_READ(CSR_TEST1);
1130 reg &= ~(0xF << 28);
1131 reg |= (1 << 28) | (1 << 31);
1132 ADM8211_CSR_WRITE(CSR_TEST1, reg);
1133
1134 /* lose link after 4 lost beacons */
1135 reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE;
1136 ADM8211_CSR_WRITE(WCSR, reg);
1137
1138 /* Disable APM, enable receive FIFO threshold, and set drain receive
1139 * threshold to store-and-forward */
1140 reg = ADM8211_CSR_READ(CMDR);
1141 reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT);
1142 reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF;
1143 ADM8211_CSR_WRITE(CMDR, reg);
1144
1145 adm8211_set_rate(dev);
1146
1147 /* 4-bit values:
1148 * PWR1UP = 8 * 2 ms
1149 * PWR0PAPE = 8 us or 5 us
1150 * PWR1PAPE = 1 us or 3 us
1151 * PWR0TRSW = 5 us
1152 * PWR1TRSW = 12 us
1153 * PWR0PE2 = 13 us
1154 * PWR1PE2 = 1 us
1155 * PWR0TXPE = 8 or 6 */
1156 if (priv->pdev->revision < ADM8211_REV_CA)
1157 ADM8211_CSR_WRITE(TOFS2, 0x8815cd18);
1158 else
1159 ADM8211_CSR_WRITE(TOFS2, 0x8535cd16);
1160
1161 /* Enable store and forward for transmit */
1162 priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB;
1163 ADM8211_CSR_WRITE(NAR, priv->nar);
1164
1165 /* Reset RF */
1166 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO);
1167 ADM8211_CSR_READ(SYNRF);
1168 msleep(10);
1169 ADM8211_CSR_WRITE(SYNRF, 0);
1170 ADM8211_CSR_READ(SYNRF);
1171 msleep(5);
1172
1173 /* Set CFP Max Duration to 0x10 TU */
1174 reg = ADM8211_CSR_READ(CFPP);
1175 reg &= ~(0xffff << 8);
1176 reg |= 0x0010 << 8;
1177 ADM8211_CSR_WRITE(CFPP, reg);
1178
1179 /* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us
1180 * TUCNT = 0x3ff - Tu counter 1024 us */
1181 ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff);
1182
1183 /* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us),
1184 * DIFS=50 us, EIFS=100 us */
1185 if (priv->pdev->revision < ADM8211_REV_CA)
1186 ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) |
1187 (50 << 9) | 100);
1188 else
1189 ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) |
1190 (50 << 9) | 100);
1191
1192 /* PCNT = 1 (MAC idle time awake/sleep, unit S)
1193 * RMRD = 2346 * 8 + 1 us (max RX duration) */
1194 ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769);
1195
1196 /* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */
1197 ADM8211_CSR_WRITE(RSPT, 0xffffff00);
1198
1199 /* Initialize BBP (and SYN) */
1200 adm8211_hw_init_bbp(dev);
1201
1202 /* make sure interrupts are off */
1203 ADM8211_CSR_WRITE(IER, 0);
1204
1205 /* ACK interrupts */
1206 ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR));
1207
1208 /* Setup WEP (turns it off for now) */
1209 reg = ADM8211_CSR_READ(MACTEST);
1210 reg &= ~(7 << 20);
1211 ADM8211_CSR_WRITE(MACTEST, reg);
1212
1213 reg = ADM8211_CSR_READ(WEPCTL);
1214 reg &= ~ADM8211_WEPCTL_WEPENABLE;
1215 reg |= ADM8211_WEPCTL_WEPRXBYP;
1216 ADM8211_CSR_WRITE(WEPCTL, reg);
1217
1218 /* Clear the missed-packet counter. */
1219 ADM8211_CSR_READ(LPC);
1220 }
1221
1222 static int adm8211_hw_reset(struct ieee80211_hw *dev)
1223 {
1224 struct adm8211_priv *priv = dev->priv;
1225 u32 reg, tmp;
1226 int timeout = 100;
1227
1228 /* Power-on issue */
1229 /* TODO: check if this is necessary */
1230 ADM8211_CSR_WRITE(FRCTL, 0);
1231
1232 /* Reset the chip */
1233 tmp = ADM8211_CSR_READ(PAR);
1234 ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR);
1235
1236 while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--)
1237 msleep(50);
1238
1239 if (timeout <= 0)
1240 return -ETIMEDOUT;
1241
1242 ADM8211_CSR_WRITE(PAR, tmp);
1243
1244 if (priv->pdev->revision == ADM8211_REV_BA &&
1245 (priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER ||
1246 priv->transceiver_type == ADM8211_RFMD2958)) {
1247 reg = ADM8211_CSR_READ(CSR_TEST1);
1248 reg |= (1 << 4) | (1 << 5);
1249 ADM8211_CSR_WRITE(CSR_TEST1, reg);
1250 } else if (priv->pdev->revision == ADM8211_REV_CA) {
1251 reg = ADM8211_CSR_READ(CSR_TEST1);
1252 reg &= ~((1 << 4) | (1 << 5));
1253 ADM8211_CSR_WRITE(CSR_TEST1, reg);
1254 }
1255
1256 ADM8211_CSR_WRITE(FRCTL, 0);
1257
1258 reg = ADM8211_CSR_READ(CSR_TEST0);
1259 reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */
1260 ADM8211_CSR_WRITE(CSR_TEST0, reg);
1261
1262 adm8211_clear_sram(dev);
1263
1264 return 0;
1265 }
1266
1267 static u64 adm8211_get_tsft(struct ieee80211_hw *dev,
1268 struct ieee80211_vif *vif)
1269 {
1270 struct adm8211_priv *priv = dev->priv;
1271 u32 tsftl;
1272 u64 tsft;
1273
1274 tsftl = ADM8211_CSR_READ(TSFTL);
1275 tsft = ADM8211_CSR_READ(TSFTH);
1276 tsft <<= 32;
1277 tsft |= tsftl;
1278
1279 return tsft;
1280 }
1281
1282 static void adm8211_set_interval(struct ieee80211_hw *dev,
1283 unsigned short bi, unsigned short li)
1284 {
1285 struct adm8211_priv *priv = dev->priv;
1286 u32 reg;
1287
1288 /* BP (beacon interval) = data->beacon_interval
1289 * LI (listen interval) = data->listen_interval (in beacon intervals) */
1290 reg = (bi << 16) | li;
1291 ADM8211_CSR_WRITE(BPLI, reg);
1292 }
1293
1294 static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid)
1295 {
1296 struct adm8211_priv *priv = dev->priv;
1297 u32 reg;
1298
1299 ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid));
1300 reg = ADM8211_CSR_READ(ABDA1);
1301 reg &= 0x0000ffff;
1302 reg |= (bssid[4] << 16) | (bssid[5] << 24);
1303 ADM8211_CSR_WRITE(ABDA1, reg);
1304 }
1305
1306 static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
1307 {
1308 struct adm8211_priv *priv = dev->priv;
1309 struct ieee80211_conf *conf = &dev->conf;
1310 int channel =
1311 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
1312
1313 if (channel != priv->channel) {
1314 priv->channel = channel;
1315 adm8211_rf_set_channel(dev, priv->channel);
1316 }
1317
1318 return 0;
1319 }
1320
1321 static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
1322 struct ieee80211_vif *vif,
1323 struct ieee80211_bss_conf *conf,
1324 u32 changes)
1325 {
1326 struct adm8211_priv *priv = dev->priv;
1327
1328 if (!(changes & BSS_CHANGED_BSSID))
1329 return;
1330
1331 if (!ether_addr_equal(conf->bssid, priv->bssid)) {
1332 adm8211_set_bssid(dev, conf->bssid);
1333 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
1334 }
1335 }
1336
1337 static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
1338 struct netdev_hw_addr_list *mc_list)
1339 {
1340 unsigned int bit_nr;
1341 u32 mc_filter[2];
1342 struct netdev_hw_addr *ha;
1343
1344 mc_filter[1] = mc_filter[0] = 0;
1345
1346 netdev_hw_addr_list_for_each(ha, mc_list) {
1347 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1348
1349 bit_nr &= 0x3F;
1350 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1351 }
1352
1353 return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
1354 }
1355
1356 static void adm8211_configure_filter(struct ieee80211_hw *dev,
1357 unsigned int changed_flags,
1358 unsigned int *total_flags,
1359 u64 multicast)
1360 {
1361 static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1362 struct adm8211_priv *priv = dev->priv;
1363 unsigned int new_flags;
1364 u32 mc_filter[2];
1365
1366 mc_filter[0] = multicast;
1367 mc_filter[1] = multicast >> 32;
1368
1369 new_flags = 0;
1370
1371 if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
1372 new_flags |= FIF_ALLMULTI;
1373 priv->nar &= ~ADM8211_NAR_PR;
1374 priv->nar |= ADM8211_NAR_MM;
1375 mc_filter[1] = mc_filter[0] = ~0;
1376 } else {
1377 priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR);
1378 }
1379
1380 ADM8211_IDLE_RX();
1381
1382 ADM8211_CSR_WRITE(MAR0, mc_filter[0]);
1383 ADM8211_CSR_WRITE(MAR1, mc_filter[1]);
1384 ADM8211_CSR_READ(NAR);
1385
1386 if (priv->nar & ADM8211_NAR_PR)
1387 ieee80211_hw_set(dev, RX_INCLUDES_FCS);
1388 else
1389 __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, dev->flags);
1390
1391 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
1392 adm8211_set_bssid(dev, bcast);
1393 else
1394 adm8211_set_bssid(dev, priv->bssid);
1395
1396 ADM8211_RESTORE();
1397
1398 *total_flags = new_flags;
1399 }
1400
1401 static int adm8211_add_interface(struct ieee80211_hw *dev,
1402 struct ieee80211_vif *vif)
1403 {
1404 struct adm8211_priv *priv = dev->priv;
1405 if (priv->mode != NL80211_IFTYPE_MONITOR)
1406 return -EOPNOTSUPP;
1407
1408 switch (vif->type) {
1409 case NL80211_IFTYPE_STATION:
1410 priv->mode = vif->type;
1411 break;
1412 default:
1413 return -EOPNOTSUPP;
1414 }
1415
1416 ADM8211_IDLE();
1417
1418 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
1419 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
1420
1421 adm8211_update_mode(dev);
1422
1423 ADM8211_RESTORE();
1424
1425 return 0;
1426 }
1427
1428 static void adm8211_remove_interface(struct ieee80211_hw *dev,
1429 struct ieee80211_vif *vif)
1430 {
1431 struct adm8211_priv *priv = dev->priv;
1432 priv->mode = NL80211_IFTYPE_MONITOR;
1433 }
1434
1435 static int adm8211_init_rings(struct ieee80211_hw *dev)
1436 {
1437 struct adm8211_priv *priv = dev->priv;
1438 struct adm8211_desc *desc = NULL;
1439 struct adm8211_rx_ring_info *rx_info;
1440 struct adm8211_tx_ring_info *tx_info;
1441 unsigned int i;
1442
1443 for (i = 0; i < priv->rx_ring_size; i++) {
1444 desc = &priv->rx_ring[i];
1445 desc->status = 0;
1446 desc->length = cpu_to_le32(RX_PKT_SIZE);
1447 priv->rx_buffers[i].skb = NULL;
1448 }
1449 /* Mark the end of RX ring; hw returns to base address after this
1450 * descriptor */
1451 desc->length |= cpu_to_le32(RDES1_CONTROL_RER);
1452
1453 for (i = 0; i < priv->rx_ring_size; i++) {
1454 desc = &priv->rx_ring[i];
1455 rx_info = &priv->rx_buffers[i];
1456
1457 rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
1458 if (rx_info->skb == NULL)
1459 break;
1460 rx_info->mapping = pci_map_single(priv->pdev,
1461 skb_tail_pointer(rx_info->skb),
1462 RX_PKT_SIZE,
1463 PCI_DMA_FROMDEVICE);
1464 desc->buffer1 = cpu_to_le32(rx_info->mapping);
1465 desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
1466 }
1467
1468 /* Setup TX ring. TX buffers descriptors will be filled in as needed */
1469 for (i = 0; i < priv->tx_ring_size; i++) {
1470 desc = &priv->tx_ring[i];
1471 tx_info = &priv->tx_buffers[i];
1472
1473 tx_info->skb = NULL;
1474 tx_info->mapping = 0;
1475 desc->status = 0;
1476 }
1477 desc->length = cpu_to_le32(TDES1_CONTROL_TER);
1478
1479 priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0;
1480 ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma);
1481 ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma);
1482
1483 return 0;
1484 }
1485
1486 static void adm8211_free_rings(struct ieee80211_hw *dev)
1487 {
1488 struct adm8211_priv *priv = dev->priv;
1489 unsigned int i;
1490
1491 for (i = 0; i < priv->rx_ring_size; i++) {
1492 if (!priv->rx_buffers[i].skb)
1493 continue;
1494
1495 pci_unmap_single(
1496 priv->pdev,
1497 priv->rx_buffers[i].mapping,
1498 RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
1499
1500 dev_kfree_skb(priv->rx_buffers[i].skb);
1501 }
1502
1503 for (i = 0; i < priv->tx_ring_size; i++) {
1504 if (!priv->tx_buffers[i].skb)
1505 continue;
1506
1507 pci_unmap_single(priv->pdev,
1508 priv->tx_buffers[i].mapping,
1509 priv->tx_buffers[i].skb->len,
1510 PCI_DMA_TODEVICE);
1511
1512 dev_kfree_skb(priv->tx_buffers[i].skb);
1513 }
1514 }
1515
1516 static int adm8211_start(struct ieee80211_hw *dev)
1517 {
1518 struct adm8211_priv *priv = dev->priv;
1519 int retval;
1520
1521 /* Power up MAC and RF chips */
1522 retval = adm8211_hw_reset(dev);
1523 if (retval) {
1524 wiphy_err(dev->wiphy, "hardware reset failed\n");
1525 goto fail;
1526 }
1527
1528 retval = adm8211_init_rings(dev);
1529 if (retval) {
1530 wiphy_err(dev->wiphy, "failed to initialize rings\n");
1531 goto fail;
1532 }
1533
1534 /* Init hardware */
1535 adm8211_hw_init(dev);
1536 adm8211_rf_set_channel(dev, priv->channel);
1537
1538 retval = request_irq(priv->pdev->irq, adm8211_interrupt,
1539 IRQF_SHARED, "adm8211", dev);
1540 if (retval) {
1541 wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
1542 goto fail;
1543 }
1544
1545 ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE |
1546 ADM8211_IER_RCIE | ADM8211_IER_TCIE |
1547 ADM8211_IER_TDUIE | ADM8211_IER_GPTIE);
1548 priv->mode = NL80211_IFTYPE_MONITOR;
1549 adm8211_update_mode(dev);
1550 ADM8211_CSR_WRITE(RDR, 0);
1551
1552 adm8211_set_interval(dev, 100, 10);
1553 return 0;
1554
1555 fail:
1556 return retval;
1557 }
1558
1559 static void adm8211_stop(struct ieee80211_hw *dev)
1560 {
1561 struct adm8211_priv *priv = dev->priv;
1562
1563 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1564 priv->nar = 0;
1565 ADM8211_CSR_WRITE(NAR, 0);
1566 ADM8211_CSR_WRITE(IER, 0);
1567 ADM8211_CSR_READ(NAR);
1568
1569 free_irq(priv->pdev->irq, dev);
1570
1571 adm8211_free_rings(dev);
1572 }
1573
1574 static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len,
1575 int plcp_signal, int short_preamble)
1576 {
1577 /* Alternative calculation from NetBSD: */
1578
1579 /* IEEE 802.11b durations for DSSS PHY in microseconds */
1580 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
1581 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
1582 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
1583 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
1584 #define IEEE80211_DUR_DS_SLOW_ACK 112
1585 #define IEEE80211_DUR_DS_FAST_ACK 56
1586 #define IEEE80211_DUR_DS_SLOW_CTS 112
1587 #define IEEE80211_DUR_DS_FAST_CTS 56
1588 #define IEEE80211_DUR_DS_SLOT 20
1589 #define IEEE80211_DUR_DS_SIFS 10
1590
1591 int remainder;
1592
1593 *dur = (80 * (24 + payload_len) + plcp_signal - 1)
1594 / plcp_signal;
1595
1596 if (plcp_signal <= PLCP_SIGNAL_2M)
1597 /* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */
1598 *dur += 3 * (IEEE80211_DUR_DS_SIFS +
1599 IEEE80211_DUR_DS_SHORT_PREAMBLE +
1600 IEEE80211_DUR_DS_FAST_PLCPHDR) +
1601 IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK;
1602 else
1603 /* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */
1604 *dur += 3 * (IEEE80211_DUR_DS_SIFS +
1605 IEEE80211_DUR_DS_SHORT_PREAMBLE +
1606 IEEE80211_DUR_DS_FAST_PLCPHDR) +
1607 IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK;
1608
1609 /* lengthen duration if long preamble */
1610 if (!short_preamble)
1611 *dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE -
1612 IEEE80211_DUR_DS_SHORT_PREAMBLE) +
1613 3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR -
1614 IEEE80211_DUR_DS_FAST_PLCPHDR);
1615
1616
1617 *plcp = (80 * len) / plcp_signal;
1618 remainder = (80 * len) % plcp_signal;
1619 if (plcp_signal == PLCP_SIGNAL_11M &&
1620 remainder <= 30 && remainder > 0)
1621 *plcp = (*plcp | 0x8000) + 1;
1622 else if (remainder)
1623 (*plcp)++;
1624 }
1625
1626 /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
1627 static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1628 u16 plcp_signal,
1629 size_t hdrlen)
1630 {
1631 struct adm8211_priv *priv = dev->priv;
1632 unsigned long flags;
1633 dma_addr_t mapping;
1634 unsigned int entry;
1635 u32 flag;
1636
1637 mapping = pci_map_single(priv->pdev, skb->data, skb->len,
1638 PCI_DMA_TODEVICE);
1639
1640 spin_lock_irqsave(&priv->lock, flags);
1641
1642 if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2)
1643 flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS;
1644 else
1645 flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS;
1646
1647 if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2)
1648 ieee80211_stop_queue(dev, 0);
1649
1650 entry = priv->cur_tx % priv->tx_ring_size;
1651
1652 priv->tx_buffers[entry].skb = skb;
1653 priv->tx_buffers[entry].mapping = mapping;
1654 priv->tx_buffers[entry].hdrlen = hdrlen;
1655 priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
1656
1657 if (entry == priv->tx_ring_size - 1)
1658 flag |= TDES1_CONTROL_TER;
1659 priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len);
1660
1661 /* Set TX rate (SIGNAL field in PLCP PPDU format) */
1662 flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */;
1663 priv->tx_ring[entry].status = cpu_to_le32(flag);
1664
1665 priv->cur_tx++;
1666
1667 spin_unlock_irqrestore(&priv->lock, flags);
1668
1669 /* Trigger transmit poll */
1670 ADM8211_CSR_WRITE(TDR, 0);
1671 }
1672
1673 /* Put adm8211_tx_hdr on skb and transmit */
1674 static void adm8211_tx(struct ieee80211_hw *dev,
1675 struct ieee80211_tx_control *control,
1676 struct sk_buff *skb)
1677 {
1678 struct adm8211_tx_hdr *txhdr;
1679 size_t payload_len, hdrlen;
1680 int plcp, dur, len, plcp_signal, short_preamble;
1681 struct ieee80211_hdr *hdr;
1682 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1683 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info);
1684 u8 rc_flags;
1685
1686 rc_flags = info->control.rates[0].flags;
1687 short_preamble = !!(rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1688 plcp_signal = txrate->bitrate;
1689
1690 hdr = (struct ieee80211_hdr *)skb->data;
1691 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1692 memcpy(skb->cb, skb->data, hdrlen);
1693 hdr = (struct ieee80211_hdr *)skb->cb;
1694 skb_pull(skb, hdrlen);
1695 payload_len = skb->len;
1696
1697 txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr));
1698 memset(txhdr, 0, sizeof(*txhdr));
1699 memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN);
1700 txhdr->signal = plcp_signal;
1701 txhdr->frame_body_size = cpu_to_le16(payload_len);
1702 txhdr->frame_control = hdr->frame_control;
1703
1704 len = hdrlen + payload_len + FCS_LEN;
1705
1706 txhdr->frag = cpu_to_le16(0x0FFF);
1707 adm8211_calc_durations(&dur, &plcp, payload_len,
1708 len, plcp_signal, short_preamble);
1709 txhdr->plcp_frag_head_len = cpu_to_le16(plcp);
1710 txhdr->plcp_frag_tail_len = cpu_to_le16(plcp);
1711 txhdr->dur_frag_head = cpu_to_le16(dur);
1712 txhdr->dur_frag_tail = cpu_to_le16(dur);
1713
1714 txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER);
1715
1716 if (short_preamble)
1717 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE);
1718
1719 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
1720 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS);
1721
1722 txhdr->retry_limit = info->control.rates[0].count;
1723
1724 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
1725 }
1726
1727 static int adm8211_alloc_rings(struct ieee80211_hw *dev)
1728 {
1729 struct adm8211_priv *priv = dev->priv;
1730 unsigned int ring_size;
1731
1732 priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size +
1733 sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL);
1734 if (!priv->rx_buffers)
1735 return -ENOMEM;
1736
1737 priv->tx_buffers = (void *)priv->rx_buffers +
1738 sizeof(*priv->rx_buffers) * priv->rx_ring_size;
1739
1740 /* Allocate TX/RX descriptors */
1741 ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size +
1742 sizeof(struct adm8211_desc) * priv->tx_ring_size;
1743 priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size,
1744 &priv->rx_ring_dma);
1745
1746 if (!priv->rx_ring) {
1747 kfree(priv->rx_buffers);
1748 priv->rx_buffers = NULL;
1749 priv->tx_buffers = NULL;
1750 return -ENOMEM;
1751 }
1752
1753 priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
1754 priv->tx_ring_dma = priv->rx_ring_dma +
1755 sizeof(struct adm8211_desc) * priv->rx_ring_size;
1756
1757 return 0;
1758 }
1759
1760 static const struct ieee80211_ops adm8211_ops = {
1761 .tx = adm8211_tx,
1762 .start = adm8211_start,
1763 .stop = adm8211_stop,
1764 .add_interface = adm8211_add_interface,
1765 .remove_interface = adm8211_remove_interface,
1766 .config = adm8211_config,
1767 .bss_info_changed = adm8211_bss_info_changed,
1768 .prepare_multicast = adm8211_prepare_multicast,
1769 .configure_filter = adm8211_configure_filter,
1770 .get_stats = adm8211_get_stats,
1771 .get_tsf = adm8211_get_tsft
1772 };
1773
1774 static int adm8211_probe(struct pci_dev *pdev,
1775 const struct pci_device_id *id)
1776 {
1777 struct ieee80211_hw *dev;
1778 struct adm8211_priv *priv;
1779 unsigned long mem_addr, mem_len;
1780 unsigned int io_addr, io_len;
1781 int err;
1782 u32 reg;
1783 u8 perm_addr[ETH_ALEN];
1784
1785 err = pci_enable_device(pdev);
1786 if (err) {
1787 printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n",
1788 pci_name(pdev));
1789 return err;
1790 }
1791
1792 io_addr = pci_resource_start(pdev, 0);
1793 io_len = pci_resource_len(pdev, 0);
1794 mem_addr = pci_resource_start(pdev, 1);
1795 mem_len = pci_resource_len(pdev, 1);
1796 if (io_len < 256 || mem_len < 1024) {
1797 printk(KERN_ERR "%s (adm8211): Too short PCI resources\n",
1798 pci_name(pdev));
1799 goto err_disable_pdev;
1800 }
1801
1802
1803 /* check signature */
1804 pci_read_config_dword(pdev, 0x80 /* CR32 */, ®);
1805 if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) {
1806 printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n",
1807 pci_name(pdev), reg);
1808 goto err_disable_pdev;
1809 }
1810
1811 err = pci_request_regions(pdev, "adm8211");
1812 if (err) {
1813 printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n",
1814 pci_name(pdev));
1815 return err; /* someone else grabbed it? don't disable it */
1816 }
1817
1818 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
1819 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1820 printk(KERN_ERR "%s (adm8211): No suitable DMA available\n",
1821 pci_name(pdev));
1822 goto err_free_reg;
1823 }
1824
1825 pci_set_master(pdev);
1826
1827 dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops);
1828 if (!dev) {
1829 printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n",
1830 pci_name(pdev));
1831 err = -ENOMEM;
1832 goto err_free_reg;
1833 }
1834 priv = dev->priv;
1835 priv->pdev = pdev;
1836
1837 spin_lock_init(&priv->lock);
1838
1839 SET_IEEE80211_DEV(dev, &pdev->dev);
1840
1841 pci_set_drvdata(pdev, dev);
1842
1843 priv->map = pci_iomap(pdev, 1, mem_len);
1844 if (!priv->map)
1845 priv->map = pci_iomap(pdev, 0, io_len);
1846
1847 if (!priv->map) {
1848 printk(KERN_ERR "%s (adm8211): Cannot map device memory\n",
1849 pci_name(pdev));
1850 err = -ENOMEM;
1851 goto err_free_dev;
1852 }
1853
1854 priv->rx_ring_size = rx_ring_size;
1855 priv->tx_ring_size = tx_ring_size;
1856
1857 if (adm8211_alloc_rings(dev)) {
1858 printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
1859 pci_name(pdev));
1860 goto err_iounmap;
1861 }
1862
1863 *(__le32 *)perm_addr = cpu_to_le32(ADM8211_CSR_READ(PAR0));
1864 *(__le16 *)&perm_addr[4] =
1865 cpu_to_le16(ADM8211_CSR_READ(PAR1) & 0xFFFF);
1866
1867 if (!is_valid_ether_addr(perm_addr)) {
1868 printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n",
1869 pci_name(pdev));
1870 eth_random_addr(perm_addr);
1871 }
1872 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
1873
1874 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
1875 /* dev->flags = RX_INCLUDES_FCS in promisc mode */
1876 ieee80211_hw_set(dev, SIGNAL_UNSPEC);
1877 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1878
1879 dev->max_signal = 100; /* FIXME: find better value */
1880
1881 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
1882
1883 priv->retry_limit = 3;
1884 priv->ant_power = 0x40;
1885 priv->tx_power = 0x40;
1886 priv->lpf_cutoff = 0xFF;
1887 priv->lnags_threshold = 0xFF;
1888 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1889
1890 /* Power-on issue. EEPROM won't read correctly without */
1891 if (pdev->revision >= ADM8211_REV_BA) {
1892 ADM8211_CSR_WRITE(FRCTL, 0);
1893 ADM8211_CSR_READ(FRCTL);
1894 ADM8211_CSR_WRITE(FRCTL, 1);
1895 ADM8211_CSR_READ(FRCTL);
1896 msleep(100);
1897 }
1898
1899 err = adm8211_read_eeprom(dev);
1900 if (err) {
1901 printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n",
1902 pci_name(pdev));
1903 goto err_free_desc;
1904 }
1905
1906 priv->channel = 1;
1907
1908 dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
1909
1910 err = ieee80211_register_hw(dev);
1911 if (err) {
1912 printk(KERN_ERR "%s (adm8211): Cannot register device\n",
1913 pci_name(pdev));
1914 goto err_free_eeprom;
1915 }
1916
1917 wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
1918 dev->wiphy->perm_addr, pdev->revision);
1919
1920 return 0;
1921
1922 err_free_eeprom:
1923 kfree(priv->eeprom);
1924
1925 err_free_desc:
1926 pci_free_consistent(pdev,
1927 sizeof(struct adm8211_desc) * priv->rx_ring_size +
1928 sizeof(struct adm8211_desc) * priv->tx_ring_size,
1929 priv->rx_ring, priv->rx_ring_dma);
1930 kfree(priv->rx_buffers);
1931
1932 err_iounmap:
1933 pci_iounmap(pdev, priv->map);
1934
1935 err_free_dev:
1936 ieee80211_free_hw(dev);
1937
1938 err_free_reg:
1939 pci_release_regions(pdev);
1940
1941 err_disable_pdev:
1942 pci_disable_device(pdev);
1943 return err;
1944 }
1945
1946
1947 static void adm8211_remove(struct pci_dev *pdev)
1948 {
1949 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
1950 struct adm8211_priv *priv;
1951
1952 if (!dev)
1953 return;
1954
1955 ieee80211_unregister_hw(dev);
1956
1957 priv = dev->priv;
1958
1959 pci_free_consistent(pdev,
1960 sizeof(struct adm8211_desc) * priv->rx_ring_size +
1961 sizeof(struct adm8211_desc) * priv->tx_ring_size,
1962 priv->rx_ring, priv->rx_ring_dma);
1963
1964 kfree(priv->rx_buffers);
1965 kfree(priv->eeprom);
1966 pci_iounmap(pdev, priv->map);
1967 pci_release_regions(pdev);
1968 pci_disable_device(pdev);
1969 ieee80211_free_hw(dev);
1970 }
1971
1972
1973 #ifdef CONFIG_PM
1974 static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)
1975 {
1976 pci_save_state(pdev);
1977 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1978 return 0;
1979 }
1980
1981 static int adm8211_resume(struct pci_dev *pdev)
1982 {
1983 pci_set_power_state(pdev, PCI_D0);
1984 pci_restore_state(pdev);
1985 return 0;
1986 }
1987 #endif /* CONFIG_PM */
1988
1989
1990 MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table);
1991
1992 /* TODO: implement enable_wake */
1993 static struct pci_driver adm8211_driver = {
1994 .name = "adm8211",
1995 .id_table = adm8211_pci_id_table,
1996 .probe = adm8211_probe,
1997 .remove = adm8211_remove,
1998 #ifdef CONFIG_PM
1999 .suspend = adm8211_suspend,
2000 .resume = adm8211_resume,
2001 #endif /* CONFIG_PM */
2002 };
2003
2004 module_pci_driver(adm8211_driver);
2005
2006
2007
2008
2009
2010 /* LDV_COMMENT_BEGIN_MAIN */
2011 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2012
2013 /*###########################################################################*/
2014
2015 /*############## Driver Environment Generator 0.2 output ####################*/
2016
2017 /*###########################################################################*/
2018
2019
2020
2021 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2022 void ldv_check_final_state(void);
2023
2024 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2025 void ldv_check_return_value(int res);
2026
2027 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2028 void ldv_check_return_value_probe(int res);
2029
2030 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2031 void ldv_initialize(void);
2032
2033 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2034 void ldv_handler_precall(void);
2035
2036 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2037 int nondet_int(void);
2038
2039 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2040 int LDV_IN_INTERRUPT;
2041
2042 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2043 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2044
2045
2046
2047 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2048 /*============================= VARIABLE DECLARATION PART =============================*/
2049 /** STRUCT: struct type: eeprom_93cx6, struct name: eeprom **/
2050 /* content: static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)*/
2051 /* LDV_COMMENT_END_PREP */
2052 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_eeprom_register_read" */
2053 struct eeprom_93cx6 * var_group1;
2054 /* LDV_COMMENT_BEGIN_PREP */
2055 #define ADM8211_INT(x) \
2056 do { \
2057 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2058 wiphy_debug(dev->wiphy, "%s\n", #x); \
2059 } while (0)
2060 #undef ADM8211_INT
2061 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2062 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2063 u16 addr, u32 value) { \
2064 struct adm8211_priv *priv = dev->priv; \
2065 unsigned int i; \
2066 u32 reg, bitbuf; \
2067 \
2068 value &= v_mask; \
2069 addr &= a_mask; \
2070 bitbuf = (value << v_shift) | (addr << a_shift); \
2071 \
2072 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2073 ADM8211_CSR_READ(SYNRF); \
2074 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2075 ADM8211_CSR_READ(SYNRF); \
2076 \
2077 if (prewrite) { \
2078 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2079 ADM8211_CSR_READ(SYNRF); \
2080 } \
2081 \
2082 for (i = 0; i <= bits; i++) { \
2083 if (bitbuf & (1 << (bits - i))) \
2084 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2085 else \
2086 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2087 \
2088 ADM8211_CSR_WRITE(SYNRF, reg); \
2089 ADM8211_CSR_READ(SYNRF); \
2090 \
2091 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2092 ADM8211_CSR_READ(SYNRF); \
2093 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2094 ADM8211_CSR_READ(SYNRF); \
2095 } \
2096 \
2097 if (postwrite == 1) { \
2098 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2099 ADM8211_CSR_READ(SYNRF); \
2100 } \
2101 if (postwrite == 2) { \
2102 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2103 ADM8211_CSR_READ(SYNRF); \
2104 } \
2105 \
2106 ADM8211_CSR_WRITE(SYNRF, 0); \
2107 ADM8211_CSR_READ(SYNRF); \
2108 }
2109 #undef WRITE_SYN
2110 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2111 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2112 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2113 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2114 #define IEEE80211_DUR_DS_SLOW_ACK 112
2115 #define IEEE80211_DUR_DS_FAST_ACK 56
2116 #define IEEE80211_DUR_DS_SLOW_CTS 112
2117 #define IEEE80211_DUR_DS_FAST_CTS 56
2118 #define IEEE80211_DUR_DS_SLOT 20
2119 #define IEEE80211_DUR_DS_SIFS 10
2120 #ifdef CONFIG_PM
2121 #endif
2122 #ifdef CONFIG_PM
2123 #endif
2124 /* LDV_COMMENT_END_PREP */
2125 /* content: static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom)*/
2126 /* LDV_COMMENT_END_PREP */
2127 /* LDV_COMMENT_BEGIN_PREP */
2128 #define ADM8211_INT(x) \
2129 do { \
2130 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2131 wiphy_debug(dev->wiphy, "%s\n", #x); \
2132 } while (0)
2133 #undef ADM8211_INT
2134 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2135 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2136 u16 addr, u32 value) { \
2137 struct adm8211_priv *priv = dev->priv; \
2138 unsigned int i; \
2139 u32 reg, bitbuf; \
2140 \
2141 value &= v_mask; \
2142 addr &= a_mask; \
2143 bitbuf = (value << v_shift) | (addr << a_shift); \
2144 \
2145 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2146 ADM8211_CSR_READ(SYNRF); \
2147 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2148 ADM8211_CSR_READ(SYNRF); \
2149 \
2150 if (prewrite) { \
2151 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2152 ADM8211_CSR_READ(SYNRF); \
2153 } \
2154 \
2155 for (i = 0; i <= bits; i++) { \
2156 if (bitbuf & (1 << (bits - i))) \
2157 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2158 else \
2159 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2160 \
2161 ADM8211_CSR_WRITE(SYNRF, reg); \
2162 ADM8211_CSR_READ(SYNRF); \
2163 \
2164 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2165 ADM8211_CSR_READ(SYNRF); \
2166 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2167 ADM8211_CSR_READ(SYNRF); \
2168 } \
2169 \
2170 if (postwrite == 1) { \
2171 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2172 ADM8211_CSR_READ(SYNRF); \
2173 } \
2174 if (postwrite == 2) { \
2175 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2176 ADM8211_CSR_READ(SYNRF); \
2177 } \
2178 \
2179 ADM8211_CSR_WRITE(SYNRF, 0); \
2180 ADM8211_CSR_READ(SYNRF); \
2181 }
2182 #undef WRITE_SYN
2183 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2184 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2185 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2186 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2187 #define IEEE80211_DUR_DS_SLOW_ACK 112
2188 #define IEEE80211_DUR_DS_FAST_ACK 56
2189 #define IEEE80211_DUR_DS_SLOW_CTS 112
2190 #define IEEE80211_DUR_DS_FAST_CTS 56
2191 #define IEEE80211_DUR_DS_SLOT 20
2192 #define IEEE80211_DUR_DS_SIFS 10
2193 #ifdef CONFIG_PM
2194 #endif
2195 #ifdef CONFIG_PM
2196 #endif
2197 /* LDV_COMMENT_END_PREP */
2198
2199 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
2200 /* content: static void adm8211_tx(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb)*/
2201 /* LDV_COMMENT_BEGIN_PREP */
2202 #define ADM8211_INT(x) \
2203 do { \
2204 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2205 wiphy_debug(dev->wiphy, "%s\n", #x); \
2206 } while (0)
2207 #undef ADM8211_INT
2208 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2209 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2210 u16 addr, u32 value) { \
2211 struct adm8211_priv *priv = dev->priv; \
2212 unsigned int i; \
2213 u32 reg, bitbuf; \
2214 \
2215 value &= v_mask; \
2216 addr &= a_mask; \
2217 bitbuf = (value << v_shift) | (addr << a_shift); \
2218 \
2219 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2220 ADM8211_CSR_READ(SYNRF); \
2221 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2222 ADM8211_CSR_READ(SYNRF); \
2223 \
2224 if (prewrite) { \
2225 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2226 ADM8211_CSR_READ(SYNRF); \
2227 } \
2228 \
2229 for (i = 0; i <= bits; i++) { \
2230 if (bitbuf & (1 << (bits - i))) \
2231 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2232 else \
2233 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2234 \
2235 ADM8211_CSR_WRITE(SYNRF, reg); \
2236 ADM8211_CSR_READ(SYNRF); \
2237 \
2238 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2239 ADM8211_CSR_READ(SYNRF); \
2240 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2241 ADM8211_CSR_READ(SYNRF); \
2242 } \
2243 \
2244 if (postwrite == 1) { \
2245 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2246 ADM8211_CSR_READ(SYNRF); \
2247 } \
2248 if (postwrite == 2) { \
2249 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2250 ADM8211_CSR_READ(SYNRF); \
2251 } \
2252 \
2253 ADM8211_CSR_WRITE(SYNRF, 0); \
2254 ADM8211_CSR_READ(SYNRF); \
2255 }
2256 #undef WRITE_SYN
2257 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2258 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2259 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2260 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2261 #define IEEE80211_DUR_DS_SLOW_ACK 112
2262 #define IEEE80211_DUR_DS_FAST_ACK 56
2263 #define IEEE80211_DUR_DS_SLOW_CTS 112
2264 #define IEEE80211_DUR_DS_FAST_CTS 56
2265 #define IEEE80211_DUR_DS_SLOT 20
2266 #define IEEE80211_DUR_DS_SIFS 10
2267 /* LDV_COMMENT_END_PREP */
2268 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_tx" */
2269 struct ieee80211_hw * var_group2;
2270 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_tx" */
2271 struct ieee80211_tx_control * var_group3;
2272 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_tx" */
2273 struct sk_buff * var_adm8211_tx_33_p2;
2274 /* LDV_COMMENT_BEGIN_PREP */
2275 #ifdef CONFIG_PM
2276 #endif
2277 #ifdef CONFIG_PM
2278 #endif
2279 /* LDV_COMMENT_END_PREP */
2280 /* content: static int adm8211_start(struct ieee80211_hw *dev)*/
2281 /* LDV_COMMENT_BEGIN_PREP */
2282 #define ADM8211_INT(x) \
2283 do { \
2284 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2285 wiphy_debug(dev->wiphy, "%s\n", #x); \
2286 } while (0)
2287 #undef ADM8211_INT
2288 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2289 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2290 u16 addr, u32 value) { \
2291 struct adm8211_priv *priv = dev->priv; \
2292 unsigned int i; \
2293 u32 reg, bitbuf; \
2294 \
2295 value &= v_mask; \
2296 addr &= a_mask; \
2297 bitbuf = (value << v_shift) | (addr << a_shift); \
2298 \
2299 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2300 ADM8211_CSR_READ(SYNRF); \
2301 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2302 ADM8211_CSR_READ(SYNRF); \
2303 \
2304 if (prewrite) { \
2305 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2306 ADM8211_CSR_READ(SYNRF); \
2307 } \
2308 \
2309 for (i = 0; i <= bits; i++) { \
2310 if (bitbuf & (1 << (bits - i))) \
2311 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2312 else \
2313 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2314 \
2315 ADM8211_CSR_WRITE(SYNRF, reg); \
2316 ADM8211_CSR_READ(SYNRF); \
2317 \
2318 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2319 ADM8211_CSR_READ(SYNRF); \
2320 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2321 ADM8211_CSR_READ(SYNRF); \
2322 } \
2323 \
2324 if (postwrite == 1) { \
2325 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2326 ADM8211_CSR_READ(SYNRF); \
2327 } \
2328 if (postwrite == 2) { \
2329 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2330 ADM8211_CSR_READ(SYNRF); \
2331 } \
2332 \
2333 ADM8211_CSR_WRITE(SYNRF, 0); \
2334 ADM8211_CSR_READ(SYNRF); \
2335 }
2336 #undef WRITE_SYN
2337 /* LDV_COMMENT_END_PREP */
2338 /* LDV_COMMENT_BEGIN_PREP */
2339 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2340 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2341 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2342 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2343 #define IEEE80211_DUR_DS_SLOW_ACK 112
2344 #define IEEE80211_DUR_DS_FAST_ACK 56
2345 #define IEEE80211_DUR_DS_SLOW_CTS 112
2346 #define IEEE80211_DUR_DS_FAST_CTS 56
2347 #define IEEE80211_DUR_DS_SLOT 20
2348 #define IEEE80211_DUR_DS_SIFS 10
2349 #ifdef CONFIG_PM
2350 #endif
2351 #ifdef CONFIG_PM
2352 #endif
2353 /* LDV_COMMENT_END_PREP */
2354 /* content: static void adm8211_stop(struct ieee80211_hw *dev)*/
2355 /* LDV_COMMENT_BEGIN_PREP */
2356 #define ADM8211_INT(x) \
2357 do { \
2358 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2359 wiphy_debug(dev->wiphy, "%s\n", #x); \
2360 } while (0)
2361 #undef ADM8211_INT
2362 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2363 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2364 u16 addr, u32 value) { \
2365 struct adm8211_priv *priv = dev->priv; \
2366 unsigned int i; \
2367 u32 reg, bitbuf; \
2368 \
2369 value &= v_mask; \
2370 addr &= a_mask; \
2371 bitbuf = (value << v_shift) | (addr << a_shift); \
2372 \
2373 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2374 ADM8211_CSR_READ(SYNRF); \
2375 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2376 ADM8211_CSR_READ(SYNRF); \
2377 \
2378 if (prewrite) { \
2379 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2380 ADM8211_CSR_READ(SYNRF); \
2381 } \
2382 \
2383 for (i = 0; i <= bits; i++) { \
2384 if (bitbuf & (1 << (bits - i))) \
2385 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2386 else \
2387 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2388 \
2389 ADM8211_CSR_WRITE(SYNRF, reg); \
2390 ADM8211_CSR_READ(SYNRF); \
2391 \
2392 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2393 ADM8211_CSR_READ(SYNRF); \
2394 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2395 ADM8211_CSR_READ(SYNRF); \
2396 } \
2397 \
2398 if (postwrite == 1) { \
2399 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2400 ADM8211_CSR_READ(SYNRF); \
2401 } \
2402 if (postwrite == 2) { \
2403 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2404 ADM8211_CSR_READ(SYNRF); \
2405 } \
2406 \
2407 ADM8211_CSR_WRITE(SYNRF, 0); \
2408 ADM8211_CSR_READ(SYNRF); \
2409 }
2410 #undef WRITE_SYN
2411 /* LDV_COMMENT_END_PREP */
2412 /* LDV_COMMENT_BEGIN_PREP */
2413 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2414 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2415 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2416 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2417 #define IEEE80211_DUR_DS_SLOW_ACK 112
2418 #define IEEE80211_DUR_DS_FAST_ACK 56
2419 #define IEEE80211_DUR_DS_SLOW_CTS 112
2420 #define IEEE80211_DUR_DS_FAST_CTS 56
2421 #define IEEE80211_DUR_DS_SLOT 20
2422 #define IEEE80211_DUR_DS_SIFS 10
2423 #ifdef CONFIG_PM
2424 #endif
2425 #ifdef CONFIG_PM
2426 #endif
2427 /* LDV_COMMENT_END_PREP */
2428 /* content: static int adm8211_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
2429 /* LDV_COMMENT_BEGIN_PREP */
2430 #define ADM8211_INT(x) \
2431 do { \
2432 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2433 wiphy_debug(dev->wiphy, "%s\n", #x); \
2434 } while (0)
2435 #undef ADM8211_INT
2436 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2437 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2438 u16 addr, u32 value) { \
2439 struct adm8211_priv *priv = dev->priv; \
2440 unsigned int i; \
2441 u32 reg, bitbuf; \
2442 \
2443 value &= v_mask; \
2444 addr &= a_mask; \
2445 bitbuf = (value << v_shift) | (addr << a_shift); \
2446 \
2447 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2448 ADM8211_CSR_READ(SYNRF); \
2449 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2450 ADM8211_CSR_READ(SYNRF); \
2451 \
2452 if (prewrite) { \
2453 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2454 ADM8211_CSR_READ(SYNRF); \
2455 } \
2456 \
2457 for (i = 0; i <= bits; i++) { \
2458 if (bitbuf & (1 << (bits - i))) \
2459 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2460 else \
2461 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2462 \
2463 ADM8211_CSR_WRITE(SYNRF, reg); \
2464 ADM8211_CSR_READ(SYNRF); \
2465 \
2466 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2467 ADM8211_CSR_READ(SYNRF); \
2468 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2469 ADM8211_CSR_READ(SYNRF); \
2470 } \
2471 \
2472 if (postwrite == 1) { \
2473 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2474 ADM8211_CSR_READ(SYNRF); \
2475 } \
2476 if (postwrite == 2) { \
2477 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2478 ADM8211_CSR_READ(SYNRF); \
2479 } \
2480 \
2481 ADM8211_CSR_WRITE(SYNRF, 0); \
2482 ADM8211_CSR_READ(SYNRF); \
2483 }
2484 #undef WRITE_SYN
2485 /* LDV_COMMENT_END_PREP */
2486 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_add_interface" */
2487 struct ieee80211_vif * var_group4;
2488 /* LDV_COMMENT_BEGIN_PREP */
2489 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2490 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2491 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2492 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2493 #define IEEE80211_DUR_DS_SLOW_ACK 112
2494 #define IEEE80211_DUR_DS_FAST_ACK 56
2495 #define IEEE80211_DUR_DS_SLOW_CTS 112
2496 #define IEEE80211_DUR_DS_FAST_CTS 56
2497 #define IEEE80211_DUR_DS_SLOT 20
2498 #define IEEE80211_DUR_DS_SIFS 10
2499 #ifdef CONFIG_PM
2500 #endif
2501 #ifdef CONFIG_PM
2502 #endif
2503 /* LDV_COMMENT_END_PREP */
2504 /* content: static void adm8211_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
2505 /* LDV_COMMENT_BEGIN_PREP */
2506 #define ADM8211_INT(x) \
2507 do { \
2508 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2509 wiphy_debug(dev->wiphy, "%s\n", #x); \
2510 } while (0)
2511 #undef ADM8211_INT
2512 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2513 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2514 u16 addr, u32 value) { \
2515 struct adm8211_priv *priv = dev->priv; \
2516 unsigned int i; \
2517 u32 reg, bitbuf; \
2518 \
2519 value &= v_mask; \
2520 addr &= a_mask; \
2521 bitbuf = (value << v_shift) | (addr << a_shift); \
2522 \
2523 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2524 ADM8211_CSR_READ(SYNRF); \
2525 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2526 ADM8211_CSR_READ(SYNRF); \
2527 \
2528 if (prewrite) { \
2529 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2530 ADM8211_CSR_READ(SYNRF); \
2531 } \
2532 \
2533 for (i = 0; i <= bits; i++) { \
2534 if (bitbuf & (1 << (bits - i))) \
2535 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2536 else \
2537 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2538 \
2539 ADM8211_CSR_WRITE(SYNRF, reg); \
2540 ADM8211_CSR_READ(SYNRF); \
2541 \
2542 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2543 ADM8211_CSR_READ(SYNRF); \
2544 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2545 ADM8211_CSR_READ(SYNRF); \
2546 } \
2547 \
2548 if (postwrite == 1) { \
2549 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2550 ADM8211_CSR_READ(SYNRF); \
2551 } \
2552 if (postwrite == 2) { \
2553 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2554 ADM8211_CSR_READ(SYNRF); \
2555 } \
2556 \
2557 ADM8211_CSR_WRITE(SYNRF, 0); \
2558 ADM8211_CSR_READ(SYNRF); \
2559 }
2560 #undef WRITE_SYN
2561 /* LDV_COMMENT_END_PREP */
2562 /* LDV_COMMENT_BEGIN_PREP */
2563 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2564 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2565 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2566 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2567 #define IEEE80211_DUR_DS_SLOW_ACK 112
2568 #define IEEE80211_DUR_DS_FAST_ACK 56
2569 #define IEEE80211_DUR_DS_SLOW_CTS 112
2570 #define IEEE80211_DUR_DS_FAST_CTS 56
2571 #define IEEE80211_DUR_DS_SLOT 20
2572 #define IEEE80211_DUR_DS_SIFS 10
2573 #ifdef CONFIG_PM
2574 #endif
2575 #ifdef CONFIG_PM
2576 #endif
2577 /* LDV_COMMENT_END_PREP */
2578 /* content: static int adm8211_config(struct ieee80211_hw *dev, u32 changed)*/
2579 /* LDV_COMMENT_BEGIN_PREP */
2580 #define ADM8211_INT(x) \
2581 do { \
2582 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2583 wiphy_debug(dev->wiphy, "%s\n", #x); \
2584 } while (0)
2585 #undef ADM8211_INT
2586 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2587 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2588 u16 addr, u32 value) { \
2589 struct adm8211_priv *priv = dev->priv; \
2590 unsigned int i; \
2591 u32 reg, bitbuf; \
2592 \
2593 value &= v_mask; \
2594 addr &= a_mask; \
2595 bitbuf = (value << v_shift) | (addr << a_shift); \
2596 \
2597 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2598 ADM8211_CSR_READ(SYNRF); \
2599 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2600 ADM8211_CSR_READ(SYNRF); \
2601 \
2602 if (prewrite) { \
2603 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2604 ADM8211_CSR_READ(SYNRF); \
2605 } \
2606 \
2607 for (i = 0; i <= bits; i++) { \
2608 if (bitbuf & (1 << (bits - i))) \
2609 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2610 else \
2611 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2612 \
2613 ADM8211_CSR_WRITE(SYNRF, reg); \
2614 ADM8211_CSR_READ(SYNRF); \
2615 \
2616 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2617 ADM8211_CSR_READ(SYNRF); \
2618 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2619 ADM8211_CSR_READ(SYNRF); \
2620 } \
2621 \
2622 if (postwrite == 1) { \
2623 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2624 ADM8211_CSR_READ(SYNRF); \
2625 } \
2626 if (postwrite == 2) { \
2627 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2628 ADM8211_CSR_READ(SYNRF); \
2629 } \
2630 \
2631 ADM8211_CSR_WRITE(SYNRF, 0); \
2632 ADM8211_CSR_READ(SYNRF); \
2633 }
2634 #undef WRITE_SYN
2635 /* LDV_COMMENT_END_PREP */
2636 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_config" */
2637 u32 var_adm8211_config_21_p1;
2638 /* LDV_COMMENT_BEGIN_PREP */
2639 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2640 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2641 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2642 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2643 #define IEEE80211_DUR_DS_SLOW_ACK 112
2644 #define IEEE80211_DUR_DS_FAST_ACK 56
2645 #define IEEE80211_DUR_DS_SLOW_CTS 112
2646 #define IEEE80211_DUR_DS_FAST_CTS 56
2647 #define IEEE80211_DUR_DS_SLOT 20
2648 #define IEEE80211_DUR_DS_SIFS 10
2649 #ifdef CONFIG_PM
2650 #endif
2651 #ifdef CONFIG_PM
2652 #endif
2653 /* LDV_COMMENT_END_PREP */
2654 /* content: static void adm8211_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changes)*/
2655 /* LDV_COMMENT_BEGIN_PREP */
2656 #define ADM8211_INT(x) \
2657 do { \
2658 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2659 wiphy_debug(dev->wiphy, "%s\n", #x); \
2660 } while (0)
2661 #undef ADM8211_INT
2662 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2663 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2664 u16 addr, u32 value) { \
2665 struct adm8211_priv *priv = dev->priv; \
2666 unsigned int i; \
2667 u32 reg, bitbuf; \
2668 \
2669 value &= v_mask; \
2670 addr &= a_mask; \
2671 bitbuf = (value << v_shift) | (addr << a_shift); \
2672 \
2673 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2674 ADM8211_CSR_READ(SYNRF); \
2675 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2676 ADM8211_CSR_READ(SYNRF); \
2677 \
2678 if (prewrite) { \
2679 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2680 ADM8211_CSR_READ(SYNRF); \
2681 } \
2682 \
2683 for (i = 0; i <= bits; i++) { \
2684 if (bitbuf & (1 << (bits - i))) \
2685 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2686 else \
2687 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2688 \
2689 ADM8211_CSR_WRITE(SYNRF, reg); \
2690 ADM8211_CSR_READ(SYNRF); \
2691 \
2692 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2693 ADM8211_CSR_READ(SYNRF); \
2694 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2695 ADM8211_CSR_READ(SYNRF); \
2696 } \
2697 \
2698 if (postwrite == 1) { \
2699 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2700 ADM8211_CSR_READ(SYNRF); \
2701 } \
2702 if (postwrite == 2) { \
2703 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2704 ADM8211_CSR_READ(SYNRF); \
2705 } \
2706 \
2707 ADM8211_CSR_WRITE(SYNRF, 0); \
2708 ADM8211_CSR_READ(SYNRF); \
2709 }
2710 #undef WRITE_SYN
2711 /* LDV_COMMENT_END_PREP */
2712 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_bss_info_changed" */
2713 struct ieee80211_bss_conf * var_adm8211_bss_info_changed_22_p2;
2714 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_bss_info_changed" */
2715 u32 var_adm8211_bss_info_changed_22_p3;
2716 /* LDV_COMMENT_BEGIN_PREP */
2717 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2718 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2719 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2720 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2721 #define IEEE80211_DUR_DS_SLOW_ACK 112
2722 #define IEEE80211_DUR_DS_FAST_ACK 56
2723 #define IEEE80211_DUR_DS_SLOW_CTS 112
2724 #define IEEE80211_DUR_DS_FAST_CTS 56
2725 #define IEEE80211_DUR_DS_SLOT 20
2726 #define IEEE80211_DUR_DS_SIFS 10
2727 #ifdef CONFIG_PM
2728 #endif
2729 #ifdef CONFIG_PM
2730 #endif
2731 /* LDV_COMMENT_END_PREP */
2732 /* content: static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list)*/
2733 /* LDV_COMMENT_BEGIN_PREP */
2734 #define ADM8211_INT(x) \
2735 do { \
2736 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2737 wiphy_debug(dev->wiphy, "%s\n", #x); \
2738 } while (0)
2739 #undef ADM8211_INT
2740 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2741 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2742 u16 addr, u32 value) { \
2743 struct adm8211_priv *priv = dev->priv; \
2744 unsigned int i; \
2745 u32 reg, bitbuf; \
2746 \
2747 value &= v_mask; \
2748 addr &= a_mask; \
2749 bitbuf = (value << v_shift) | (addr << a_shift); \
2750 \
2751 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2752 ADM8211_CSR_READ(SYNRF); \
2753 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2754 ADM8211_CSR_READ(SYNRF); \
2755 \
2756 if (prewrite) { \
2757 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2758 ADM8211_CSR_READ(SYNRF); \
2759 } \
2760 \
2761 for (i = 0; i <= bits; i++) { \
2762 if (bitbuf & (1 << (bits - i))) \
2763 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2764 else \
2765 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2766 \
2767 ADM8211_CSR_WRITE(SYNRF, reg); \
2768 ADM8211_CSR_READ(SYNRF); \
2769 \
2770 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2771 ADM8211_CSR_READ(SYNRF); \
2772 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2773 ADM8211_CSR_READ(SYNRF); \
2774 } \
2775 \
2776 if (postwrite == 1) { \
2777 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2778 ADM8211_CSR_READ(SYNRF); \
2779 } \
2780 if (postwrite == 2) { \
2781 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2782 ADM8211_CSR_READ(SYNRF); \
2783 } \
2784 \
2785 ADM8211_CSR_WRITE(SYNRF, 0); \
2786 ADM8211_CSR_READ(SYNRF); \
2787 }
2788 #undef WRITE_SYN
2789 /* LDV_COMMENT_END_PREP */
2790 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_prepare_multicast" */
2791 struct netdev_hw_addr_list * var_group5;
2792 /* LDV_COMMENT_BEGIN_PREP */
2793 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2794 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2795 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2796 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2797 #define IEEE80211_DUR_DS_SLOW_ACK 112
2798 #define IEEE80211_DUR_DS_FAST_ACK 56
2799 #define IEEE80211_DUR_DS_SLOW_CTS 112
2800 #define IEEE80211_DUR_DS_FAST_CTS 56
2801 #define IEEE80211_DUR_DS_SLOT 20
2802 #define IEEE80211_DUR_DS_SIFS 10
2803 #ifdef CONFIG_PM
2804 #endif
2805 #ifdef CONFIG_PM
2806 #endif
2807 /* LDV_COMMENT_END_PREP */
2808 /* content: static void adm8211_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast)*/
2809 /* LDV_COMMENT_BEGIN_PREP */
2810 #define ADM8211_INT(x) \
2811 do { \
2812 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2813 wiphy_debug(dev->wiphy, "%s\n", #x); \
2814 } while (0)
2815 #undef ADM8211_INT
2816 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2817 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2818 u16 addr, u32 value) { \
2819 struct adm8211_priv *priv = dev->priv; \
2820 unsigned int i; \
2821 u32 reg, bitbuf; \
2822 \
2823 value &= v_mask; \
2824 addr &= a_mask; \
2825 bitbuf = (value << v_shift) | (addr << a_shift); \
2826 \
2827 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2828 ADM8211_CSR_READ(SYNRF); \
2829 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2830 ADM8211_CSR_READ(SYNRF); \
2831 \
2832 if (prewrite) { \
2833 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2834 ADM8211_CSR_READ(SYNRF); \
2835 } \
2836 \
2837 for (i = 0; i <= bits; i++) { \
2838 if (bitbuf & (1 << (bits - i))) \
2839 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2840 else \
2841 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2842 \
2843 ADM8211_CSR_WRITE(SYNRF, reg); \
2844 ADM8211_CSR_READ(SYNRF); \
2845 \
2846 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2847 ADM8211_CSR_READ(SYNRF); \
2848 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2849 ADM8211_CSR_READ(SYNRF); \
2850 } \
2851 \
2852 if (postwrite == 1) { \
2853 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2854 ADM8211_CSR_READ(SYNRF); \
2855 } \
2856 if (postwrite == 2) { \
2857 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2858 ADM8211_CSR_READ(SYNRF); \
2859 } \
2860 \
2861 ADM8211_CSR_WRITE(SYNRF, 0); \
2862 ADM8211_CSR_READ(SYNRF); \
2863 }
2864 #undef WRITE_SYN
2865 /* LDV_COMMENT_END_PREP */
2866 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_configure_filter" */
2867 unsigned int var_adm8211_configure_filter_24_p1;
2868 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_configure_filter" */
2869 unsigned int * var_adm8211_configure_filter_24_p2;
2870 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_configure_filter" */
2871 u64 var_adm8211_configure_filter_24_p3;
2872 /* LDV_COMMENT_BEGIN_PREP */
2873 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2874 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2875 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2876 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2877 #define IEEE80211_DUR_DS_SLOW_ACK 112
2878 #define IEEE80211_DUR_DS_FAST_ACK 56
2879 #define IEEE80211_DUR_DS_SLOW_CTS 112
2880 #define IEEE80211_DUR_DS_FAST_CTS 56
2881 #define IEEE80211_DUR_DS_SLOT 20
2882 #define IEEE80211_DUR_DS_SIFS 10
2883 #ifdef CONFIG_PM
2884 #endif
2885 #ifdef CONFIG_PM
2886 #endif
2887 /* LDV_COMMENT_END_PREP */
2888 /* content: static int adm8211_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats)*/
2889 /* LDV_COMMENT_END_PREP */
2890 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_get_stats" */
2891 struct ieee80211_low_level_stats * var_group6;
2892 /* LDV_COMMENT_BEGIN_PREP */
2893 #define ADM8211_INT(x) \
2894 do { \
2895 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2896 wiphy_debug(dev->wiphy, "%s\n", #x); \
2897 } while (0)
2898 #undef ADM8211_INT
2899 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2900 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2901 u16 addr, u32 value) { \
2902 struct adm8211_priv *priv = dev->priv; \
2903 unsigned int i; \
2904 u32 reg, bitbuf; \
2905 \
2906 value &= v_mask; \
2907 addr &= a_mask; \
2908 bitbuf = (value << v_shift) | (addr << a_shift); \
2909 \
2910 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2911 ADM8211_CSR_READ(SYNRF); \
2912 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2913 ADM8211_CSR_READ(SYNRF); \
2914 \
2915 if (prewrite) { \
2916 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2917 ADM8211_CSR_READ(SYNRF); \
2918 } \
2919 \
2920 for (i = 0; i <= bits; i++) { \
2921 if (bitbuf & (1 << (bits - i))) \
2922 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2923 else \
2924 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2925 \
2926 ADM8211_CSR_WRITE(SYNRF, reg); \
2927 ADM8211_CSR_READ(SYNRF); \
2928 \
2929 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
2930 ADM8211_CSR_READ(SYNRF); \
2931 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
2932 ADM8211_CSR_READ(SYNRF); \
2933 } \
2934 \
2935 if (postwrite == 1) { \
2936 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
2937 ADM8211_CSR_READ(SYNRF); \
2938 } \
2939 if (postwrite == 2) { \
2940 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
2941 ADM8211_CSR_READ(SYNRF); \
2942 } \
2943 \
2944 ADM8211_CSR_WRITE(SYNRF, 0); \
2945 ADM8211_CSR_READ(SYNRF); \
2946 }
2947 #undef WRITE_SYN
2948 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
2949 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
2950 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
2951 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
2952 #define IEEE80211_DUR_DS_SLOW_ACK 112
2953 #define IEEE80211_DUR_DS_FAST_ACK 56
2954 #define IEEE80211_DUR_DS_SLOW_CTS 112
2955 #define IEEE80211_DUR_DS_FAST_CTS 56
2956 #define IEEE80211_DUR_DS_SLOT 20
2957 #define IEEE80211_DUR_DS_SIFS 10
2958 #ifdef CONFIG_PM
2959 #endif
2960 #ifdef CONFIG_PM
2961 #endif
2962 /* LDV_COMMENT_END_PREP */
2963 /* content: static u64 adm8211_get_tsft(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
2964 /* LDV_COMMENT_BEGIN_PREP */
2965 #define ADM8211_INT(x) \
2966 do { \
2967 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
2968 wiphy_debug(dev->wiphy, "%s\n", #x); \
2969 } while (0)
2970 #undef ADM8211_INT
2971 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
2972 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
2973 u16 addr, u32 value) { \
2974 struct adm8211_priv *priv = dev->priv; \
2975 unsigned int i; \
2976 u32 reg, bitbuf; \
2977 \
2978 value &= v_mask; \
2979 addr &= a_mask; \
2980 bitbuf = (value << v_shift) | (addr << a_shift); \
2981 \
2982 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
2983 ADM8211_CSR_READ(SYNRF); \
2984 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
2985 ADM8211_CSR_READ(SYNRF); \
2986 \
2987 if (prewrite) { \
2988 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
2989 ADM8211_CSR_READ(SYNRF); \
2990 } \
2991 \
2992 for (i = 0; i <= bits; i++) { \
2993 if (bitbuf & (1 << (bits - i))) \
2994 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
2995 else \
2996 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
2997 \
2998 ADM8211_CSR_WRITE(SYNRF, reg); \
2999 ADM8211_CSR_READ(SYNRF); \
3000 \
3001 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3002 ADM8211_CSR_READ(SYNRF); \
3003 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3004 ADM8211_CSR_READ(SYNRF); \
3005 } \
3006 \
3007 if (postwrite == 1) { \
3008 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3009 ADM8211_CSR_READ(SYNRF); \
3010 } \
3011 if (postwrite == 2) { \
3012 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3013 ADM8211_CSR_READ(SYNRF); \
3014 } \
3015 \
3016 ADM8211_CSR_WRITE(SYNRF, 0); \
3017 ADM8211_CSR_READ(SYNRF); \
3018 }
3019 #undef WRITE_SYN
3020 /* LDV_COMMENT_END_PREP */
3021 /* LDV_COMMENT_BEGIN_PREP */
3022 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3023 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3024 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3025 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3026 #define IEEE80211_DUR_DS_SLOW_ACK 112
3027 #define IEEE80211_DUR_DS_FAST_ACK 56
3028 #define IEEE80211_DUR_DS_SLOW_CTS 112
3029 #define IEEE80211_DUR_DS_FAST_CTS 56
3030 #define IEEE80211_DUR_DS_SLOT 20
3031 #define IEEE80211_DUR_DS_SIFS 10
3032 #ifdef CONFIG_PM
3033 #endif
3034 #ifdef CONFIG_PM
3035 #endif
3036 /* LDV_COMMENT_END_PREP */
3037
3038 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
3039 /* content: static int adm8211_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
3040 /* LDV_COMMENT_BEGIN_PREP */
3041 #define ADM8211_INT(x) \
3042 do { \
3043 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3044 wiphy_debug(dev->wiphy, "%s\n", #x); \
3045 } while (0)
3046 #undef ADM8211_INT
3047 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3048 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3049 u16 addr, u32 value) { \
3050 struct adm8211_priv *priv = dev->priv; \
3051 unsigned int i; \
3052 u32 reg, bitbuf; \
3053 \
3054 value &= v_mask; \
3055 addr &= a_mask; \
3056 bitbuf = (value << v_shift) | (addr << a_shift); \
3057 \
3058 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3059 ADM8211_CSR_READ(SYNRF); \
3060 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3061 ADM8211_CSR_READ(SYNRF); \
3062 \
3063 if (prewrite) { \
3064 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3065 ADM8211_CSR_READ(SYNRF); \
3066 } \
3067 \
3068 for (i = 0; i <= bits; i++) { \
3069 if (bitbuf & (1 << (bits - i))) \
3070 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3071 else \
3072 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3073 \
3074 ADM8211_CSR_WRITE(SYNRF, reg); \
3075 ADM8211_CSR_READ(SYNRF); \
3076 \
3077 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3078 ADM8211_CSR_READ(SYNRF); \
3079 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3080 ADM8211_CSR_READ(SYNRF); \
3081 } \
3082 \
3083 if (postwrite == 1) { \
3084 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3085 ADM8211_CSR_READ(SYNRF); \
3086 } \
3087 if (postwrite == 2) { \
3088 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3089 ADM8211_CSR_READ(SYNRF); \
3090 } \
3091 \
3092 ADM8211_CSR_WRITE(SYNRF, 0); \
3093 ADM8211_CSR_READ(SYNRF); \
3094 }
3095 #undef WRITE_SYN
3096 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3097 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3098 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3099 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3100 #define IEEE80211_DUR_DS_SLOW_ACK 112
3101 #define IEEE80211_DUR_DS_FAST_ACK 56
3102 #define IEEE80211_DUR_DS_SLOW_CTS 112
3103 #define IEEE80211_DUR_DS_FAST_CTS 56
3104 #define IEEE80211_DUR_DS_SLOT 20
3105 #define IEEE80211_DUR_DS_SIFS 10
3106 /* LDV_COMMENT_END_PREP */
3107 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_probe" */
3108 struct pci_dev * var_group7;
3109 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_probe" */
3110 const struct pci_device_id * var_adm8211_probe_35_p1;
3111 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "adm8211_probe" */
3112 static int res_adm8211_probe_35;
3113 /* LDV_COMMENT_BEGIN_PREP */
3114 #ifdef CONFIG_PM
3115 #endif
3116 #ifdef CONFIG_PM
3117 #endif
3118 /* LDV_COMMENT_END_PREP */
3119 /* content: static void adm8211_remove(struct pci_dev *pdev)*/
3120 /* LDV_COMMENT_BEGIN_PREP */
3121 #define ADM8211_INT(x) \
3122 do { \
3123 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3124 wiphy_debug(dev->wiphy, "%s\n", #x); \
3125 } while (0)
3126 #undef ADM8211_INT
3127 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3128 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3129 u16 addr, u32 value) { \
3130 struct adm8211_priv *priv = dev->priv; \
3131 unsigned int i; \
3132 u32 reg, bitbuf; \
3133 \
3134 value &= v_mask; \
3135 addr &= a_mask; \
3136 bitbuf = (value << v_shift) | (addr << a_shift); \
3137 \
3138 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3139 ADM8211_CSR_READ(SYNRF); \
3140 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3141 ADM8211_CSR_READ(SYNRF); \
3142 \
3143 if (prewrite) { \
3144 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3145 ADM8211_CSR_READ(SYNRF); \
3146 } \
3147 \
3148 for (i = 0; i <= bits; i++) { \
3149 if (bitbuf & (1 << (bits - i))) \
3150 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3151 else \
3152 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3153 \
3154 ADM8211_CSR_WRITE(SYNRF, reg); \
3155 ADM8211_CSR_READ(SYNRF); \
3156 \
3157 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3158 ADM8211_CSR_READ(SYNRF); \
3159 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3160 ADM8211_CSR_READ(SYNRF); \
3161 } \
3162 \
3163 if (postwrite == 1) { \
3164 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3165 ADM8211_CSR_READ(SYNRF); \
3166 } \
3167 if (postwrite == 2) { \
3168 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3169 ADM8211_CSR_READ(SYNRF); \
3170 } \
3171 \
3172 ADM8211_CSR_WRITE(SYNRF, 0); \
3173 ADM8211_CSR_READ(SYNRF); \
3174 }
3175 #undef WRITE_SYN
3176 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3177 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3178 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3179 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3180 #define IEEE80211_DUR_DS_SLOW_ACK 112
3181 #define IEEE80211_DUR_DS_FAST_ACK 56
3182 #define IEEE80211_DUR_DS_SLOW_CTS 112
3183 #define IEEE80211_DUR_DS_FAST_CTS 56
3184 #define IEEE80211_DUR_DS_SLOT 20
3185 #define IEEE80211_DUR_DS_SIFS 10
3186 /* LDV_COMMENT_END_PREP */
3187 /* LDV_COMMENT_BEGIN_PREP */
3188 #ifdef CONFIG_PM
3189 #endif
3190 #ifdef CONFIG_PM
3191 #endif
3192 /* LDV_COMMENT_END_PREP */
3193 /* content: static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)*/
3194 /* LDV_COMMENT_BEGIN_PREP */
3195 #define ADM8211_INT(x) \
3196 do { \
3197 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3198 wiphy_debug(dev->wiphy, "%s\n", #x); \
3199 } while (0)
3200 #undef ADM8211_INT
3201 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3202 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3203 u16 addr, u32 value) { \
3204 struct adm8211_priv *priv = dev->priv; \
3205 unsigned int i; \
3206 u32 reg, bitbuf; \
3207 \
3208 value &= v_mask; \
3209 addr &= a_mask; \
3210 bitbuf = (value << v_shift) | (addr << a_shift); \
3211 \
3212 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3213 ADM8211_CSR_READ(SYNRF); \
3214 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3215 ADM8211_CSR_READ(SYNRF); \
3216 \
3217 if (prewrite) { \
3218 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3219 ADM8211_CSR_READ(SYNRF); \
3220 } \
3221 \
3222 for (i = 0; i <= bits; i++) { \
3223 if (bitbuf & (1 << (bits - i))) \
3224 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3225 else \
3226 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3227 \
3228 ADM8211_CSR_WRITE(SYNRF, reg); \
3229 ADM8211_CSR_READ(SYNRF); \
3230 \
3231 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3232 ADM8211_CSR_READ(SYNRF); \
3233 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3234 ADM8211_CSR_READ(SYNRF); \
3235 } \
3236 \
3237 if (postwrite == 1) { \
3238 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3239 ADM8211_CSR_READ(SYNRF); \
3240 } \
3241 if (postwrite == 2) { \
3242 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3243 ADM8211_CSR_READ(SYNRF); \
3244 } \
3245 \
3246 ADM8211_CSR_WRITE(SYNRF, 0); \
3247 ADM8211_CSR_READ(SYNRF); \
3248 }
3249 #undef WRITE_SYN
3250 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3251 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3252 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3253 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3254 #define IEEE80211_DUR_DS_SLOW_ACK 112
3255 #define IEEE80211_DUR_DS_FAST_ACK 56
3256 #define IEEE80211_DUR_DS_SLOW_CTS 112
3257 #define IEEE80211_DUR_DS_FAST_CTS 56
3258 #define IEEE80211_DUR_DS_SLOT 20
3259 #define IEEE80211_DUR_DS_SIFS 10
3260 #ifdef CONFIG_PM
3261 /* LDV_COMMENT_END_PREP */
3262 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_suspend" */
3263 pm_message_t var_adm8211_suspend_37_p1;
3264 /* LDV_COMMENT_BEGIN_PREP */
3265 #endif
3266 #ifdef CONFIG_PM
3267 #endif
3268 /* LDV_COMMENT_END_PREP */
3269 /* content: static int adm8211_resume(struct pci_dev *pdev)*/
3270 /* LDV_COMMENT_BEGIN_PREP */
3271 #define ADM8211_INT(x) \
3272 do { \
3273 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3274 wiphy_debug(dev->wiphy, "%s\n", #x); \
3275 } while (0)
3276 #undef ADM8211_INT
3277 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3278 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3279 u16 addr, u32 value) { \
3280 struct adm8211_priv *priv = dev->priv; \
3281 unsigned int i; \
3282 u32 reg, bitbuf; \
3283 \
3284 value &= v_mask; \
3285 addr &= a_mask; \
3286 bitbuf = (value << v_shift) | (addr << a_shift); \
3287 \
3288 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3289 ADM8211_CSR_READ(SYNRF); \
3290 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3291 ADM8211_CSR_READ(SYNRF); \
3292 \
3293 if (prewrite) { \
3294 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3295 ADM8211_CSR_READ(SYNRF); \
3296 } \
3297 \
3298 for (i = 0; i <= bits; i++) { \
3299 if (bitbuf & (1 << (bits - i))) \
3300 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3301 else \
3302 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3303 \
3304 ADM8211_CSR_WRITE(SYNRF, reg); \
3305 ADM8211_CSR_READ(SYNRF); \
3306 \
3307 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3308 ADM8211_CSR_READ(SYNRF); \
3309 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3310 ADM8211_CSR_READ(SYNRF); \
3311 } \
3312 \
3313 if (postwrite == 1) { \
3314 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3315 ADM8211_CSR_READ(SYNRF); \
3316 } \
3317 if (postwrite == 2) { \
3318 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3319 ADM8211_CSR_READ(SYNRF); \
3320 } \
3321 \
3322 ADM8211_CSR_WRITE(SYNRF, 0); \
3323 ADM8211_CSR_READ(SYNRF); \
3324 }
3325 #undef WRITE_SYN
3326 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3327 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3328 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3329 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3330 #define IEEE80211_DUR_DS_SLOW_ACK 112
3331 #define IEEE80211_DUR_DS_FAST_ACK 56
3332 #define IEEE80211_DUR_DS_SLOW_CTS 112
3333 #define IEEE80211_DUR_DS_FAST_CTS 56
3334 #define IEEE80211_DUR_DS_SLOT 20
3335 #define IEEE80211_DUR_DS_SIFS 10
3336 #ifdef CONFIG_PM
3337 /* LDV_COMMENT_END_PREP */
3338 /* LDV_COMMENT_BEGIN_PREP */
3339 #endif
3340 #ifdef CONFIG_PM
3341 #endif
3342 /* LDV_COMMENT_END_PREP */
3343
3344 /** CALLBACK SECTION request_irq **/
3345 /* content: static irqreturn_t adm8211_interrupt(int irq, void *dev_id)*/
3346 /* LDV_COMMENT_END_PREP */
3347 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_interrupt" */
3348 int var_adm8211_interrupt_9_p0;
3349 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "adm8211_interrupt" */
3350 void * var_adm8211_interrupt_9_p1;
3351 /* LDV_COMMENT_BEGIN_PREP */
3352 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3353 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3354 u16 addr, u32 value) { \
3355 struct adm8211_priv *priv = dev->priv; \
3356 unsigned int i; \
3357 u32 reg, bitbuf; \
3358 \
3359 value &= v_mask; \
3360 addr &= a_mask; \
3361 bitbuf = (value << v_shift) | (addr << a_shift); \
3362 \
3363 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3364 ADM8211_CSR_READ(SYNRF); \
3365 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3366 ADM8211_CSR_READ(SYNRF); \
3367 \
3368 if (prewrite) { \
3369 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3370 ADM8211_CSR_READ(SYNRF); \
3371 } \
3372 \
3373 for (i = 0; i <= bits; i++) { \
3374 if (bitbuf & (1 << (bits - i))) \
3375 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3376 else \
3377 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3378 \
3379 ADM8211_CSR_WRITE(SYNRF, reg); \
3380 ADM8211_CSR_READ(SYNRF); \
3381 \
3382 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3383 ADM8211_CSR_READ(SYNRF); \
3384 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3385 ADM8211_CSR_READ(SYNRF); \
3386 } \
3387 \
3388 if (postwrite == 1) { \
3389 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3390 ADM8211_CSR_READ(SYNRF); \
3391 } \
3392 if (postwrite == 2) { \
3393 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3394 ADM8211_CSR_READ(SYNRF); \
3395 } \
3396 \
3397 ADM8211_CSR_WRITE(SYNRF, 0); \
3398 ADM8211_CSR_READ(SYNRF); \
3399 }
3400 #undef WRITE_SYN
3401 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3402 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3403 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3404 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3405 #define IEEE80211_DUR_DS_SLOW_ACK 112
3406 #define IEEE80211_DUR_DS_FAST_ACK 56
3407 #define IEEE80211_DUR_DS_SLOW_CTS 112
3408 #define IEEE80211_DUR_DS_FAST_CTS 56
3409 #define IEEE80211_DUR_DS_SLOT 20
3410 #define IEEE80211_DUR_DS_SIFS 10
3411 #ifdef CONFIG_PM
3412 #endif
3413 #ifdef CONFIG_PM
3414 #endif
3415 /* LDV_COMMENT_END_PREP */
3416
3417
3418
3419
3420 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
3421 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
3422 /*============================= VARIABLE INITIALIZING PART =============================*/
3423 LDV_IN_INTERRUPT=1;
3424
3425
3426
3427
3428 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
3429 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
3430 /*============================= FUNCTION CALL SECTION =============================*/
3431 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
3432 ldv_initialize();
3433
3434
3435
3436
3437 int ldv_s_adm8211_driver_pci_driver = 0;
3438
3439
3440
3441
3442
3443 while( nondet_int()
3444 || !(ldv_s_adm8211_driver_pci_driver == 0)
3445 ) {
3446
3447 switch(nondet_int()) {
3448
3449 case 0: {
3450
3451 /** STRUCT: struct type: eeprom_93cx6, struct name: eeprom **/
3452
3453
3454 /* content: static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)*/
3455 /* LDV_COMMENT_END_PREP */
3456 /* LDV_COMMENT_FUNCTION_CALL Function from field "register_read" from driver structure with callbacks "eeprom" */
3457 ldv_handler_precall();
3458 adm8211_eeprom_register_read( var_group1);
3459 /* LDV_COMMENT_BEGIN_PREP */
3460 #define ADM8211_INT(x) \
3461 do { \
3462 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3463 wiphy_debug(dev->wiphy, "%s\n", #x); \
3464 } while (0)
3465 #undef ADM8211_INT
3466 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3467 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3468 u16 addr, u32 value) { \
3469 struct adm8211_priv *priv = dev->priv; \
3470 unsigned int i; \
3471 u32 reg, bitbuf; \
3472 \
3473 value &= v_mask; \
3474 addr &= a_mask; \
3475 bitbuf = (value << v_shift) | (addr << a_shift); \
3476 \
3477 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3478 ADM8211_CSR_READ(SYNRF); \
3479 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3480 ADM8211_CSR_READ(SYNRF); \
3481 \
3482 if (prewrite) { \
3483 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3484 ADM8211_CSR_READ(SYNRF); \
3485 } \
3486 \
3487 for (i = 0; i <= bits; i++) { \
3488 if (bitbuf & (1 << (bits - i))) \
3489 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3490 else \
3491 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3492 \
3493 ADM8211_CSR_WRITE(SYNRF, reg); \
3494 ADM8211_CSR_READ(SYNRF); \
3495 \
3496 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3497 ADM8211_CSR_READ(SYNRF); \
3498 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3499 ADM8211_CSR_READ(SYNRF); \
3500 } \
3501 \
3502 if (postwrite == 1) { \
3503 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3504 ADM8211_CSR_READ(SYNRF); \
3505 } \
3506 if (postwrite == 2) { \
3507 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3508 ADM8211_CSR_READ(SYNRF); \
3509 } \
3510 \
3511 ADM8211_CSR_WRITE(SYNRF, 0); \
3512 ADM8211_CSR_READ(SYNRF); \
3513 }
3514 #undef WRITE_SYN
3515 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3516 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3517 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3518 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3519 #define IEEE80211_DUR_DS_SLOW_ACK 112
3520 #define IEEE80211_DUR_DS_FAST_ACK 56
3521 #define IEEE80211_DUR_DS_SLOW_CTS 112
3522 #define IEEE80211_DUR_DS_FAST_CTS 56
3523 #define IEEE80211_DUR_DS_SLOT 20
3524 #define IEEE80211_DUR_DS_SIFS 10
3525 #ifdef CONFIG_PM
3526 #endif
3527 #ifdef CONFIG_PM
3528 #endif
3529 /* LDV_COMMENT_END_PREP */
3530
3531
3532
3533
3534 }
3535
3536 break;
3537 case 1: {
3538
3539 /** STRUCT: struct type: eeprom_93cx6, struct name: eeprom **/
3540
3541
3542 /* content: static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom)*/
3543 /* LDV_COMMENT_END_PREP */
3544 /* LDV_COMMENT_FUNCTION_CALL Function from field "register_write" from driver structure with callbacks "eeprom" */
3545 ldv_handler_precall();
3546 adm8211_eeprom_register_write( var_group1);
3547 /* LDV_COMMENT_BEGIN_PREP */
3548 #define ADM8211_INT(x) \
3549 do { \
3550 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3551 wiphy_debug(dev->wiphy, "%s\n", #x); \
3552 } while (0)
3553 #undef ADM8211_INT
3554 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3555 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3556 u16 addr, u32 value) { \
3557 struct adm8211_priv *priv = dev->priv; \
3558 unsigned int i; \
3559 u32 reg, bitbuf; \
3560 \
3561 value &= v_mask; \
3562 addr &= a_mask; \
3563 bitbuf = (value << v_shift) | (addr << a_shift); \
3564 \
3565 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3566 ADM8211_CSR_READ(SYNRF); \
3567 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3568 ADM8211_CSR_READ(SYNRF); \
3569 \
3570 if (prewrite) { \
3571 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3572 ADM8211_CSR_READ(SYNRF); \
3573 } \
3574 \
3575 for (i = 0; i <= bits; i++) { \
3576 if (bitbuf & (1 << (bits - i))) \
3577 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3578 else \
3579 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3580 \
3581 ADM8211_CSR_WRITE(SYNRF, reg); \
3582 ADM8211_CSR_READ(SYNRF); \
3583 \
3584 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3585 ADM8211_CSR_READ(SYNRF); \
3586 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3587 ADM8211_CSR_READ(SYNRF); \
3588 } \
3589 \
3590 if (postwrite == 1) { \
3591 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3592 ADM8211_CSR_READ(SYNRF); \
3593 } \
3594 if (postwrite == 2) { \
3595 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3596 ADM8211_CSR_READ(SYNRF); \
3597 } \
3598 \
3599 ADM8211_CSR_WRITE(SYNRF, 0); \
3600 ADM8211_CSR_READ(SYNRF); \
3601 }
3602 #undef WRITE_SYN
3603 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3604 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3605 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3606 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3607 #define IEEE80211_DUR_DS_SLOW_ACK 112
3608 #define IEEE80211_DUR_DS_FAST_ACK 56
3609 #define IEEE80211_DUR_DS_SLOW_CTS 112
3610 #define IEEE80211_DUR_DS_FAST_CTS 56
3611 #define IEEE80211_DUR_DS_SLOT 20
3612 #define IEEE80211_DUR_DS_SIFS 10
3613 #ifdef CONFIG_PM
3614 #endif
3615 #ifdef CONFIG_PM
3616 #endif
3617 /* LDV_COMMENT_END_PREP */
3618
3619
3620
3621
3622 }
3623
3624 break;
3625 case 2: {
3626
3627 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3628
3629
3630 /* content: static void adm8211_tx(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb)*/
3631 /* LDV_COMMENT_BEGIN_PREP */
3632 #define ADM8211_INT(x) \
3633 do { \
3634 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3635 wiphy_debug(dev->wiphy, "%s\n", #x); \
3636 } while (0)
3637 #undef ADM8211_INT
3638 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3639 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3640 u16 addr, u32 value) { \
3641 struct adm8211_priv *priv = dev->priv; \
3642 unsigned int i; \
3643 u32 reg, bitbuf; \
3644 \
3645 value &= v_mask; \
3646 addr &= a_mask; \
3647 bitbuf = (value << v_shift) | (addr << a_shift); \
3648 \
3649 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3650 ADM8211_CSR_READ(SYNRF); \
3651 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3652 ADM8211_CSR_READ(SYNRF); \
3653 \
3654 if (prewrite) { \
3655 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3656 ADM8211_CSR_READ(SYNRF); \
3657 } \
3658 \
3659 for (i = 0; i <= bits; i++) { \
3660 if (bitbuf & (1 << (bits - i))) \
3661 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3662 else \
3663 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3664 \
3665 ADM8211_CSR_WRITE(SYNRF, reg); \
3666 ADM8211_CSR_READ(SYNRF); \
3667 \
3668 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3669 ADM8211_CSR_READ(SYNRF); \
3670 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3671 ADM8211_CSR_READ(SYNRF); \
3672 } \
3673 \
3674 if (postwrite == 1) { \
3675 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3676 ADM8211_CSR_READ(SYNRF); \
3677 } \
3678 if (postwrite == 2) { \
3679 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3680 ADM8211_CSR_READ(SYNRF); \
3681 } \
3682 \
3683 ADM8211_CSR_WRITE(SYNRF, 0); \
3684 ADM8211_CSR_READ(SYNRF); \
3685 }
3686 #undef WRITE_SYN
3687 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3688 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3689 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3690 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3691 #define IEEE80211_DUR_DS_SLOW_ACK 112
3692 #define IEEE80211_DUR_DS_FAST_ACK 56
3693 #define IEEE80211_DUR_DS_SLOW_CTS 112
3694 #define IEEE80211_DUR_DS_FAST_CTS 56
3695 #define IEEE80211_DUR_DS_SLOT 20
3696 #define IEEE80211_DUR_DS_SIFS 10
3697 /* LDV_COMMENT_END_PREP */
3698 /* LDV_COMMENT_FUNCTION_CALL Function from field "tx" from driver structure with callbacks "adm8211_ops" */
3699 ldv_handler_precall();
3700 adm8211_tx( var_group2, var_group3, var_adm8211_tx_33_p2);
3701 /* LDV_COMMENT_BEGIN_PREP */
3702 #ifdef CONFIG_PM
3703 #endif
3704 #ifdef CONFIG_PM
3705 #endif
3706 /* LDV_COMMENT_END_PREP */
3707
3708
3709
3710
3711 }
3712
3713 break;
3714 case 3: {
3715
3716 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3717
3718
3719 /* content: static int adm8211_start(struct ieee80211_hw *dev)*/
3720 /* LDV_COMMENT_BEGIN_PREP */
3721 #define ADM8211_INT(x) \
3722 do { \
3723 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3724 wiphy_debug(dev->wiphy, "%s\n", #x); \
3725 } while (0)
3726 #undef ADM8211_INT
3727 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3728 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3729 u16 addr, u32 value) { \
3730 struct adm8211_priv *priv = dev->priv; \
3731 unsigned int i; \
3732 u32 reg, bitbuf; \
3733 \
3734 value &= v_mask; \
3735 addr &= a_mask; \
3736 bitbuf = (value << v_shift) | (addr << a_shift); \
3737 \
3738 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3739 ADM8211_CSR_READ(SYNRF); \
3740 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3741 ADM8211_CSR_READ(SYNRF); \
3742 \
3743 if (prewrite) { \
3744 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3745 ADM8211_CSR_READ(SYNRF); \
3746 } \
3747 \
3748 for (i = 0; i <= bits; i++) { \
3749 if (bitbuf & (1 << (bits - i))) \
3750 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3751 else \
3752 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3753 \
3754 ADM8211_CSR_WRITE(SYNRF, reg); \
3755 ADM8211_CSR_READ(SYNRF); \
3756 \
3757 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3758 ADM8211_CSR_READ(SYNRF); \
3759 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3760 ADM8211_CSR_READ(SYNRF); \
3761 } \
3762 \
3763 if (postwrite == 1) { \
3764 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3765 ADM8211_CSR_READ(SYNRF); \
3766 } \
3767 if (postwrite == 2) { \
3768 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3769 ADM8211_CSR_READ(SYNRF); \
3770 } \
3771 \
3772 ADM8211_CSR_WRITE(SYNRF, 0); \
3773 ADM8211_CSR_READ(SYNRF); \
3774 }
3775 #undef WRITE_SYN
3776 /* LDV_COMMENT_END_PREP */
3777 /* LDV_COMMENT_FUNCTION_CALL Function from field "start" from driver structure with callbacks "adm8211_ops" */
3778 ldv_handler_precall();
3779 adm8211_start( var_group2);
3780 /* LDV_COMMENT_BEGIN_PREP */
3781 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3782 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3783 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3784 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3785 #define IEEE80211_DUR_DS_SLOW_ACK 112
3786 #define IEEE80211_DUR_DS_FAST_ACK 56
3787 #define IEEE80211_DUR_DS_SLOW_CTS 112
3788 #define IEEE80211_DUR_DS_FAST_CTS 56
3789 #define IEEE80211_DUR_DS_SLOT 20
3790 #define IEEE80211_DUR_DS_SIFS 10
3791 #ifdef CONFIG_PM
3792 #endif
3793 #ifdef CONFIG_PM
3794 #endif
3795 /* LDV_COMMENT_END_PREP */
3796
3797
3798
3799
3800 }
3801
3802 break;
3803 case 4: {
3804
3805 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3806
3807
3808 /* content: static void adm8211_stop(struct ieee80211_hw *dev)*/
3809 /* LDV_COMMENT_BEGIN_PREP */
3810 #define ADM8211_INT(x) \
3811 do { \
3812 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3813 wiphy_debug(dev->wiphy, "%s\n", #x); \
3814 } while (0)
3815 #undef ADM8211_INT
3816 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3817 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3818 u16 addr, u32 value) { \
3819 struct adm8211_priv *priv = dev->priv; \
3820 unsigned int i; \
3821 u32 reg, bitbuf; \
3822 \
3823 value &= v_mask; \
3824 addr &= a_mask; \
3825 bitbuf = (value << v_shift) | (addr << a_shift); \
3826 \
3827 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3828 ADM8211_CSR_READ(SYNRF); \
3829 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3830 ADM8211_CSR_READ(SYNRF); \
3831 \
3832 if (prewrite) { \
3833 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3834 ADM8211_CSR_READ(SYNRF); \
3835 } \
3836 \
3837 for (i = 0; i <= bits; i++) { \
3838 if (bitbuf & (1 << (bits - i))) \
3839 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3840 else \
3841 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3842 \
3843 ADM8211_CSR_WRITE(SYNRF, reg); \
3844 ADM8211_CSR_READ(SYNRF); \
3845 \
3846 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3847 ADM8211_CSR_READ(SYNRF); \
3848 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3849 ADM8211_CSR_READ(SYNRF); \
3850 } \
3851 \
3852 if (postwrite == 1) { \
3853 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3854 ADM8211_CSR_READ(SYNRF); \
3855 } \
3856 if (postwrite == 2) { \
3857 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3858 ADM8211_CSR_READ(SYNRF); \
3859 } \
3860 \
3861 ADM8211_CSR_WRITE(SYNRF, 0); \
3862 ADM8211_CSR_READ(SYNRF); \
3863 }
3864 #undef WRITE_SYN
3865 /* LDV_COMMENT_END_PREP */
3866 /* LDV_COMMENT_FUNCTION_CALL Function from field "stop" from driver structure with callbacks "adm8211_ops" */
3867 ldv_handler_precall();
3868 adm8211_stop( var_group2);
3869 /* LDV_COMMENT_BEGIN_PREP */
3870 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3871 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3872 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3873 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3874 #define IEEE80211_DUR_DS_SLOW_ACK 112
3875 #define IEEE80211_DUR_DS_FAST_ACK 56
3876 #define IEEE80211_DUR_DS_SLOW_CTS 112
3877 #define IEEE80211_DUR_DS_FAST_CTS 56
3878 #define IEEE80211_DUR_DS_SLOT 20
3879 #define IEEE80211_DUR_DS_SIFS 10
3880 #ifdef CONFIG_PM
3881 #endif
3882 #ifdef CONFIG_PM
3883 #endif
3884 /* LDV_COMMENT_END_PREP */
3885
3886
3887
3888
3889 }
3890
3891 break;
3892 case 5: {
3893
3894 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3895
3896
3897 /* content: static int adm8211_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
3898 /* LDV_COMMENT_BEGIN_PREP */
3899 #define ADM8211_INT(x) \
3900 do { \
3901 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3902 wiphy_debug(dev->wiphy, "%s\n", #x); \
3903 } while (0)
3904 #undef ADM8211_INT
3905 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3906 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3907 u16 addr, u32 value) { \
3908 struct adm8211_priv *priv = dev->priv; \
3909 unsigned int i; \
3910 u32 reg, bitbuf; \
3911 \
3912 value &= v_mask; \
3913 addr &= a_mask; \
3914 bitbuf = (value << v_shift) | (addr << a_shift); \
3915 \
3916 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
3917 ADM8211_CSR_READ(SYNRF); \
3918 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
3919 ADM8211_CSR_READ(SYNRF); \
3920 \
3921 if (prewrite) { \
3922 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
3923 ADM8211_CSR_READ(SYNRF); \
3924 } \
3925 \
3926 for (i = 0; i <= bits; i++) { \
3927 if (bitbuf & (1 << (bits - i))) \
3928 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
3929 else \
3930 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
3931 \
3932 ADM8211_CSR_WRITE(SYNRF, reg); \
3933 ADM8211_CSR_READ(SYNRF); \
3934 \
3935 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
3936 ADM8211_CSR_READ(SYNRF); \
3937 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
3938 ADM8211_CSR_READ(SYNRF); \
3939 } \
3940 \
3941 if (postwrite == 1) { \
3942 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
3943 ADM8211_CSR_READ(SYNRF); \
3944 } \
3945 if (postwrite == 2) { \
3946 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
3947 ADM8211_CSR_READ(SYNRF); \
3948 } \
3949 \
3950 ADM8211_CSR_WRITE(SYNRF, 0); \
3951 ADM8211_CSR_READ(SYNRF); \
3952 }
3953 #undef WRITE_SYN
3954 /* LDV_COMMENT_END_PREP */
3955 /* LDV_COMMENT_FUNCTION_CALL Function from field "add_interface" from driver structure with callbacks "adm8211_ops" */
3956 ldv_handler_precall();
3957 adm8211_add_interface( var_group2, var_group4);
3958 /* LDV_COMMENT_BEGIN_PREP */
3959 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
3960 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
3961 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
3962 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
3963 #define IEEE80211_DUR_DS_SLOW_ACK 112
3964 #define IEEE80211_DUR_DS_FAST_ACK 56
3965 #define IEEE80211_DUR_DS_SLOW_CTS 112
3966 #define IEEE80211_DUR_DS_FAST_CTS 56
3967 #define IEEE80211_DUR_DS_SLOT 20
3968 #define IEEE80211_DUR_DS_SIFS 10
3969 #ifdef CONFIG_PM
3970 #endif
3971 #ifdef CONFIG_PM
3972 #endif
3973 /* LDV_COMMENT_END_PREP */
3974
3975
3976
3977
3978 }
3979
3980 break;
3981 case 6: {
3982
3983 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
3984
3985
3986 /* content: static void adm8211_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
3987 /* LDV_COMMENT_BEGIN_PREP */
3988 #define ADM8211_INT(x) \
3989 do { \
3990 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
3991 wiphy_debug(dev->wiphy, "%s\n", #x); \
3992 } while (0)
3993 #undef ADM8211_INT
3994 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
3995 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
3996 u16 addr, u32 value) { \
3997 struct adm8211_priv *priv = dev->priv; \
3998 unsigned int i; \
3999 u32 reg, bitbuf; \
4000 \
4001 value &= v_mask; \
4002 addr &= a_mask; \
4003 bitbuf = (value << v_shift) | (addr << a_shift); \
4004 \
4005 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4006 ADM8211_CSR_READ(SYNRF); \
4007 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4008 ADM8211_CSR_READ(SYNRF); \
4009 \
4010 if (prewrite) { \
4011 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4012 ADM8211_CSR_READ(SYNRF); \
4013 } \
4014 \
4015 for (i = 0; i <= bits; i++) { \
4016 if (bitbuf & (1 << (bits - i))) \
4017 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4018 else \
4019 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4020 \
4021 ADM8211_CSR_WRITE(SYNRF, reg); \
4022 ADM8211_CSR_READ(SYNRF); \
4023 \
4024 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4025 ADM8211_CSR_READ(SYNRF); \
4026 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4027 ADM8211_CSR_READ(SYNRF); \
4028 } \
4029 \
4030 if (postwrite == 1) { \
4031 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4032 ADM8211_CSR_READ(SYNRF); \
4033 } \
4034 if (postwrite == 2) { \
4035 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4036 ADM8211_CSR_READ(SYNRF); \
4037 } \
4038 \
4039 ADM8211_CSR_WRITE(SYNRF, 0); \
4040 ADM8211_CSR_READ(SYNRF); \
4041 }
4042 #undef WRITE_SYN
4043 /* LDV_COMMENT_END_PREP */
4044 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove_interface" from driver structure with callbacks "adm8211_ops" */
4045 ldv_handler_precall();
4046 adm8211_remove_interface( var_group2, var_group4);
4047 /* LDV_COMMENT_BEGIN_PREP */
4048 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4049 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4050 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4051 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4052 #define IEEE80211_DUR_DS_SLOW_ACK 112
4053 #define IEEE80211_DUR_DS_FAST_ACK 56
4054 #define IEEE80211_DUR_DS_SLOW_CTS 112
4055 #define IEEE80211_DUR_DS_FAST_CTS 56
4056 #define IEEE80211_DUR_DS_SLOT 20
4057 #define IEEE80211_DUR_DS_SIFS 10
4058 #ifdef CONFIG_PM
4059 #endif
4060 #ifdef CONFIG_PM
4061 #endif
4062 /* LDV_COMMENT_END_PREP */
4063
4064
4065
4066
4067 }
4068
4069 break;
4070 case 7: {
4071
4072 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4073
4074
4075 /* content: static int adm8211_config(struct ieee80211_hw *dev, u32 changed)*/
4076 /* LDV_COMMENT_BEGIN_PREP */
4077 #define ADM8211_INT(x) \
4078 do { \
4079 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4080 wiphy_debug(dev->wiphy, "%s\n", #x); \
4081 } while (0)
4082 #undef ADM8211_INT
4083 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4084 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4085 u16 addr, u32 value) { \
4086 struct adm8211_priv *priv = dev->priv; \
4087 unsigned int i; \
4088 u32 reg, bitbuf; \
4089 \
4090 value &= v_mask; \
4091 addr &= a_mask; \
4092 bitbuf = (value << v_shift) | (addr << a_shift); \
4093 \
4094 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4095 ADM8211_CSR_READ(SYNRF); \
4096 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4097 ADM8211_CSR_READ(SYNRF); \
4098 \
4099 if (prewrite) { \
4100 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4101 ADM8211_CSR_READ(SYNRF); \
4102 } \
4103 \
4104 for (i = 0; i <= bits; i++) { \
4105 if (bitbuf & (1 << (bits - i))) \
4106 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4107 else \
4108 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4109 \
4110 ADM8211_CSR_WRITE(SYNRF, reg); \
4111 ADM8211_CSR_READ(SYNRF); \
4112 \
4113 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4114 ADM8211_CSR_READ(SYNRF); \
4115 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4116 ADM8211_CSR_READ(SYNRF); \
4117 } \
4118 \
4119 if (postwrite == 1) { \
4120 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4121 ADM8211_CSR_READ(SYNRF); \
4122 } \
4123 if (postwrite == 2) { \
4124 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4125 ADM8211_CSR_READ(SYNRF); \
4126 } \
4127 \
4128 ADM8211_CSR_WRITE(SYNRF, 0); \
4129 ADM8211_CSR_READ(SYNRF); \
4130 }
4131 #undef WRITE_SYN
4132 /* LDV_COMMENT_END_PREP */
4133 /* LDV_COMMENT_FUNCTION_CALL Function from field "config" from driver structure with callbacks "adm8211_ops" */
4134 ldv_handler_precall();
4135 adm8211_config( var_group2, var_adm8211_config_21_p1);
4136 /* LDV_COMMENT_BEGIN_PREP */
4137 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4138 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4139 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4140 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4141 #define IEEE80211_DUR_DS_SLOW_ACK 112
4142 #define IEEE80211_DUR_DS_FAST_ACK 56
4143 #define IEEE80211_DUR_DS_SLOW_CTS 112
4144 #define IEEE80211_DUR_DS_FAST_CTS 56
4145 #define IEEE80211_DUR_DS_SLOT 20
4146 #define IEEE80211_DUR_DS_SIFS 10
4147 #ifdef CONFIG_PM
4148 #endif
4149 #ifdef CONFIG_PM
4150 #endif
4151 /* LDV_COMMENT_END_PREP */
4152
4153
4154
4155
4156 }
4157
4158 break;
4159 case 8: {
4160
4161 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4162
4163
4164 /* content: static void adm8211_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changes)*/
4165 /* LDV_COMMENT_BEGIN_PREP */
4166 #define ADM8211_INT(x) \
4167 do { \
4168 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4169 wiphy_debug(dev->wiphy, "%s\n", #x); \
4170 } while (0)
4171 #undef ADM8211_INT
4172 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4173 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4174 u16 addr, u32 value) { \
4175 struct adm8211_priv *priv = dev->priv; \
4176 unsigned int i; \
4177 u32 reg, bitbuf; \
4178 \
4179 value &= v_mask; \
4180 addr &= a_mask; \
4181 bitbuf = (value << v_shift) | (addr << a_shift); \
4182 \
4183 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4184 ADM8211_CSR_READ(SYNRF); \
4185 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4186 ADM8211_CSR_READ(SYNRF); \
4187 \
4188 if (prewrite) { \
4189 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4190 ADM8211_CSR_READ(SYNRF); \
4191 } \
4192 \
4193 for (i = 0; i <= bits; i++) { \
4194 if (bitbuf & (1 << (bits - i))) \
4195 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4196 else \
4197 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4198 \
4199 ADM8211_CSR_WRITE(SYNRF, reg); \
4200 ADM8211_CSR_READ(SYNRF); \
4201 \
4202 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4203 ADM8211_CSR_READ(SYNRF); \
4204 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4205 ADM8211_CSR_READ(SYNRF); \
4206 } \
4207 \
4208 if (postwrite == 1) { \
4209 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4210 ADM8211_CSR_READ(SYNRF); \
4211 } \
4212 if (postwrite == 2) { \
4213 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4214 ADM8211_CSR_READ(SYNRF); \
4215 } \
4216 \
4217 ADM8211_CSR_WRITE(SYNRF, 0); \
4218 ADM8211_CSR_READ(SYNRF); \
4219 }
4220 #undef WRITE_SYN
4221 /* LDV_COMMENT_END_PREP */
4222 /* LDV_COMMENT_FUNCTION_CALL Function from field "bss_info_changed" from driver structure with callbacks "adm8211_ops" */
4223 ldv_handler_precall();
4224 adm8211_bss_info_changed( var_group2, var_group4, var_adm8211_bss_info_changed_22_p2, var_adm8211_bss_info_changed_22_p3);
4225 /* LDV_COMMENT_BEGIN_PREP */
4226 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4227 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4228 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4229 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4230 #define IEEE80211_DUR_DS_SLOW_ACK 112
4231 #define IEEE80211_DUR_DS_FAST_ACK 56
4232 #define IEEE80211_DUR_DS_SLOW_CTS 112
4233 #define IEEE80211_DUR_DS_FAST_CTS 56
4234 #define IEEE80211_DUR_DS_SLOT 20
4235 #define IEEE80211_DUR_DS_SIFS 10
4236 #ifdef CONFIG_PM
4237 #endif
4238 #ifdef CONFIG_PM
4239 #endif
4240 /* LDV_COMMENT_END_PREP */
4241
4242
4243
4244
4245 }
4246
4247 break;
4248 case 9: {
4249
4250 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4251
4252
4253 /* content: static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list)*/
4254 /* LDV_COMMENT_BEGIN_PREP */
4255 #define ADM8211_INT(x) \
4256 do { \
4257 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4258 wiphy_debug(dev->wiphy, "%s\n", #x); \
4259 } while (0)
4260 #undef ADM8211_INT
4261 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4262 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4263 u16 addr, u32 value) { \
4264 struct adm8211_priv *priv = dev->priv; \
4265 unsigned int i; \
4266 u32 reg, bitbuf; \
4267 \
4268 value &= v_mask; \
4269 addr &= a_mask; \
4270 bitbuf = (value << v_shift) | (addr << a_shift); \
4271 \
4272 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4273 ADM8211_CSR_READ(SYNRF); \
4274 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4275 ADM8211_CSR_READ(SYNRF); \
4276 \
4277 if (prewrite) { \
4278 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4279 ADM8211_CSR_READ(SYNRF); \
4280 } \
4281 \
4282 for (i = 0; i <= bits; i++) { \
4283 if (bitbuf & (1 << (bits - i))) \
4284 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4285 else \
4286 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4287 \
4288 ADM8211_CSR_WRITE(SYNRF, reg); \
4289 ADM8211_CSR_READ(SYNRF); \
4290 \
4291 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4292 ADM8211_CSR_READ(SYNRF); \
4293 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4294 ADM8211_CSR_READ(SYNRF); \
4295 } \
4296 \
4297 if (postwrite == 1) { \
4298 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4299 ADM8211_CSR_READ(SYNRF); \
4300 } \
4301 if (postwrite == 2) { \
4302 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4303 ADM8211_CSR_READ(SYNRF); \
4304 } \
4305 \
4306 ADM8211_CSR_WRITE(SYNRF, 0); \
4307 ADM8211_CSR_READ(SYNRF); \
4308 }
4309 #undef WRITE_SYN
4310 /* LDV_COMMENT_END_PREP */
4311 /* LDV_COMMENT_FUNCTION_CALL Function from field "prepare_multicast" from driver structure with callbacks "adm8211_ops" */
4312 ldv_handler_precall();
4313 adm8211_prepare_multicast( var_group2, var_group5);
4314 /* LDV_COMMENT_BEGIN_PREP */
4315 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4316 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4317 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4318 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4319 #define IEEE80211_DUR_DS_SLOW_ACK 112
4320 #define IEEE80211_DUR_DS_FAST_ACK 56
4321 #define IEEE80211_DUR_DS_SLOW_CTS 112
4322 #define IEEE80211_DUR_DS_FAST_CTS 56
4323 #define IEEE80211_DUR_DS_SLOT 20
4324 #define IEEE80211_DUR_DS_SIFS 10
4325 #ifdef CONFIG_PM
4326 #endif
4327 #ifdef CONFIG_PM
4328 #endif
4329 /* LDV_COMMENT_END_PREP */
4330
4331
4332
4333
4334 }
4335
4336 break;
4337 case 10: {
4338
4339 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4340
4341
4342 /* content: static void adm8211_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast)*/
4343 /* LDV_COMMENT_BEGIN_PREP */
4344 #define ADM8211_INT(x) \
4345 do { \
4346 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4347 wiphy_debug(dev->wiphy, "%s\n", #x); \
4348 } while (0)
4349 #undef ADM8211_INT
4350 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4351 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4352 u16 addr, u32 value) { \
4353 struct adm8211_priv *priv = dev->priv; \
4354 unsigned int i; \
4355 u32 reg, bitbuf; \
4356 \
4357 value &= v_mask; \
4358 addr &= a_mask; \
4359 bitbuf = (value << v_shift) | (addr << a_shift); \
4360 \
4361 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4362 ADM8211_CSR_READ(SYNRF); \
4363 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4364 ADM8211_CSR_READ(SYNRF); \
4365 \
4366 if (prewrite) { \
4367 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4368 ADM8211_CSR_READ(SYNRF); \
4369 } \
4370 \
4371 for (i = 0; i <= bits; i++) { \
4372 if (bitbuf & (1 << (bits - i))) \
4373 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4374 else \
4375 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4376 \
4377 ADM8211_CSR_WRITE(SYNRF, reg); \
4378 ADM8211_CSR_READ(SYNRF); \
4379 \
4380 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4381 ADM8211_CSR_READ(SYNRF); \
4382 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4383 ADM8211_CSR_READ(SYNRF); \
4384 } \
4385 \
4386 if (postwrite == 1) { \
4387 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4388 ADM8211_CSR_READ(SYNRF); \
4389 } \
4390 if (postwrite == 2) { \
4391 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4392 ADM8211_CSR_READ(SYNRF); \
4393 } \
4394 \
4395 ADM8211_CSR_WRITE(SYNRF, 0); \
4396 ADM8211_CSR_READ(SYNRF); \
4397 }
4398 #undef WRITE_SYN
4399 /* LDV_COMMENT_END_PREP */
4400 /* LDV_COMMENT_FUNCTION_CALL Function from field "configure_filter" from driver structure with callbacks "adm8211_ops" */
4401 ldv_handler_precall();
4402 adm8211_configure_filter( var_group2, var_adm8211_configure_filter_24_p1, var_adm8211_configure_filter_24_p2, var_adm8211_configure_filter_24_p3);
4403 /* LDV_COMMENT_BEGIN_PREP */
4404 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4405 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4406 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4407 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4408 #define IEEE80211_DUR_DS_SLOW_ACK 112
4409 #define IEEE80211_DUR_DS_FAST_ACK 56
4410 #define IEEE80211_DUR_DS_SLOW_CTS 112
4411 #define IEEE80211_DUR_DS_FAST_CTS 56
4412 #define IEEE80211_DUR_DS_SLOT 20
4413 #define IEEE80211_DUR_DS_SIFS 10
4414 #ifdef CONFIG_PM
4415 #endif
4416 #ifdef CONFIG_PM
4417 #endif
4418 /* LDV_COMMENT_END_PREP */
4419
4420
4421
4422
4423 }
4424
4425 break;
4426 case 11: {
4427
4428 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4429
4430
4431 /* content: static int adm8211_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats)*/
4432 /* LDV_COMMENT_END_PREP */
4433 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_stats" from driver structure with callbacks "adm8211_ops" */
4434 ldv_handler_precall();
4435 adm8211_get_stats( var_group2, var_group6);
4436 /* LDV_COMMENT_BEGIN_PREP */
4437 #define ADM8211_INT(x) \
4438 do { \
4439 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4440 wiphy_debug(dev->wiphy, "%s\n", #x); \
4441 } while (0)
4442 #undef ADM8211_INT
4443 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4444 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4445 u16 addr, u32 value) { \
4446 struct adm8211_priv *priv = dev->priv; \
4447 unsigned int i; \
4448 u32 reg, bitbuf; \
4449 \
4450 value &= v_mask; \
4451 addr &= a_mask; \
4452 bitbuf = (value << v_shift) | (addr << a_shift); \
4453 \
4454 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4455 ADM8211_CSR_READ(SYNRF); \
4456 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4457 ADM8211_CSR_READ(SYNRF); \
4458 \
4459 if (prewrite) { \
4460 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4461 ADM8211_CSR_READ(SYNRF); \
4462 } \
4463 \
4464 for (i = 0; i <= bits; i++) { \
4465 if (bitbuf & (1 << (bits - i))) \
4466 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4467 else \
4468 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4469 \
4470 ADM8211_CSR_WRITE(SYNRF, reg); \
4471 ADM8211_CSR_READ(SYNRF); \
4472 \
4473 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4474 ADM8211_CSR_READ(SYNRF); \
4475 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4476 ADM8211_CSR_READ(SYNRF); \
4477 } \
4478 \
4479 if (postwrite == 1) { \
4480 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4481 ADM8211_CSR_READ(SYNRF); \
4482 } \
4483 if (postwrite == 2) { \
4484 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4485 ADM8211_CSR_READ(SYNRF); \
4486 } \
4487 \
4488 ADM8211_CSR_WRITE(SYNRF, 0); \
4489 ADM8211_CSR_READ(SYNRF); \
4490 }
4491 #undef WRITE_SYN
4492 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4493 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4494 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4495 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4496 #define IEEE80211_DUR_DS_SLOW_ACK 112
4497 #define IEEE80211_DUR_DS_FAST_ACK 56
4498 #define IEEE80211_DUR_DS_SLOW_CTS 112
4499 #define IEEE80211_DUR_DS_FAST_CTS 56
4500 #define IEEE80211_DUR_DS_SLOT 20
4501 #define IEEE80211_DUR_DS_SIFS 10
4502 #ifdef CONFIG_PM
4503 #endif
4504 #ifdef CONFIG_PM
4505 #endif
4506 /* LDV_COMMENT_END_PREP */
4507
4508
4509
4510
4511 }
4512
4513 break;
4514 case 12: {
4515
4516 /** STRUCT: struct type: ieee80211_ops, struct name: adm8211_ops **/
4517
4518
4519 /* content: static u64 adm8211_get_tsft(struct ieee80211_hw *dev, struct ieee80211_vif *vif)*/
4520 /* LDV_COMMENT_BEGIN_PREP */
4521 #define ADM8211_INT(x) \
4522 do { \
4523 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4524 wiphy_debug(dev->wiphy, "%s\n", #x); \
4525 } while (0)
4526 #undef ADM8211_INT
4527 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4528 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4529 u16 addr, u32 value) { \
4530 struct adm8211_priv *priv = dev->priv; \
4531 unsigned int i; \
4532 u32 reg, bitbuf; \
4533 \
4534 value &= v_mask; \
4535 addr &= a_mask; \
4536 bitbuf = (value << v_shift) | (addr << a_shift); \
4537 \
4538 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4539 ADM8211_CSR_READ(SYNRF); \
4540 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4541 ADM8211_CSR_READ(SYNRF); \
4542 \
4543 if (prewrite) { \
4544 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4545 ADM8211_CSR_READ(SYNRF); \
4546 } \
4547 \
4548 for (i = 0; i <= bits; i++) { \
4549 if (bitbuf & (1 << (bits - i))) \
4550 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4551 else \
4552 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4553 \
4554 ADM8211_CSR_WRITE(SYNRF, reg); \
4555 ADM8211_CSR_READ(SYNRF); \
4556 \
4557 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4558 ADM8211_CSR_READ(SYNRF); \
4559 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4560 ADM8211_CSR_READ(SYNRF); \
4561 } \
4562 \
4563 if (postwrite == 1) { \
4564 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4565 ADM8211_CSR_READ(SYNRF); \
4566 } \
4567 if (postwrite == 2) { \
4568 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4569 ADM8211_CSR_READ(SYNRF); \
4570 } \
4571 \
4572 ADM8211_CSR_WRITE(SYNRF, 0); \
4573 ADM8211_CSR_READ(SYNRF); \
4574 }
4575 #undef WRITE_SYN
4576 /* LDV_COMMENT_END_PREP */
4577 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_tsf" from driver structure with callbacks "adm8211_ops" */
4578 ldv_handler_precall();
4579 adm8211_get_tsft( var_group2, var_group4);
4580 /* LDV_COMMENT_BEGIN_PREP */
4581 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4582 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4583 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4584 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4585 #define IEEE80211_DUR_DS_SLOW_ACK 112
4586 #define IEEE80211_DUR_DS_FAST_ACK 56
4587 #define IEEE80211_DUR_DS_SLOW_CTS 112
4588 #define IEEE80211_DUR_DS_FAST_CTS 56
4589 #define IEEE80211_DUR_DS_SLOT 20
4590 #define IEEE80211_DUR_DS_SIFS 10
4591 #ifdef CONFIG_PM
4592 #endif
4593 #ifdef CONFIG_PM
4594 #endif
4595 /* LDV_COMMENT_END_PREP */
4596
4597
4598
4599
4600 }
4601
4602 break;
4603 case 13: {
4604
4605 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4606 if(ldv_s_adm8211_driver_pci_driver==0) {
4607
4608 /* content: static int adm8211_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
4609 /* LDV_COMMENT_BEGIN_PREP */
4610 #define ADM8211_INT(x) \
4611 do { \
4612 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4613 wiphy_debug(dev->wiphy, "%s\n", #x); \
4614 } while (0)
4615 #undef ADM8211_INT
4616 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4617 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4618 u16 addr, u32 value) { \
4619 struct adm8211_priv *priv = dev->priv; \
4620 unsigned int i; \
4621 u32 reg, bitbuf; \
4622 \
4623 value &= v_mask; \
4624 addr &= a_mask; \
4625 bitbuf = (value << v_shift) | (addr << a_shift); \
4626 \
4627 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4628 ADM8211_CSR_READ(SYNRF); \
4629 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4630 ADM8211_CSR_READ(SYNRF); \
4631 \
4632 if (prewrite) { \
4633 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4634 ADM8211_CSR_READ(SYNRF); \
4635 } \
4636 \
4637 for (i = 0; i <= bits; i++) { \
4638 if (bitbuf & (1 << (bits - i))) \
4639 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4640 else \
4641 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4642 \
4643 ADM8211_CSR_WRITE(SYNRF, reg); \
4644 ADM8211_CSR_READ(SYNRF); \
4645 \
4646 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4647 ADM8211_CSR_READ(SYNRF); \
4648 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4649 ADM8211_CSR_READ(SYNRF); \
4650 } \
4651 \
4652 if (postwrite == 1) { \
4653 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4654 ADM8211_CSR_READ(SYNRF); \
4655 } \
4656 if (postwrite == 2) { \
4657 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4658 ADM8211_CSR_READ(SYNRF); \
4659 } \
4660 \
4661 ADM8211_CSR_WRITE(SYNRF, 0); \
4662 ADM8211_CSR_READ(SYNRF); \
4663 }
4664 #undef WRITE_SYN
4665 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4666 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4667 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4668 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4669 #define IEEE80211_DUR_DS_SLOW_ACK 112
4670 #define IEEE80211_DUR_DS_FAST_ACK 56
4671 #define IEEE80211_DUR_DS_SLOW_CTS 112
4672 #define IEEE80211_DUR_DS_FAST_CTS 56
4673 #define IEEE80211_DUR_DS_SLOT 20
4674 #define IEEE80211_DUR_DS_SIFS 10
4675 /* LDV_COMMENT_END_PREP */
4676 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "adm8211_driver". Standart function test for correct return result. */
4677 res_adm8211_probe_35 = adm8211_probe( var_group7, var_adm8211_probe_35_p1);
4678 ldv_check_return_value(res_adm8211_probe_35);
4679 ldv_check_return_value_probe(res_adm8211_probe_35);
4680 if(res_adm8211_probe_35)
4681 goto ldv_module_exit;
4682 /* LDV_COMMENT_BEGIN_PREP */
4683 #ifdef CONFIG_PM
4684 #endif
4685 #ifdef CONFIG_PM
4686 #endif
4687 /* LDV_COMMENT_END_PREP */
4688 ldv_s_adm8211_driver_pci_driver++;
4689
4690 }
4691
4692 }
4693
4694 break;
4695 case 14: {
4696
4697 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4698 if(ldv_s_adm8211_driver_pci_driver==1) {
4699
4700 /* content: static void adm8211_remove(struct pci_dev *pdev)*/
4701 /* LDV_COMMENT_BEGIN_PREP */
4702 #define ADM8211_INT(x) \
4703 do { \
4704 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4705 wiphy_debug(dev->wiphy, "%s\n", #x); \
4706 } while (0)
4707 #undef ADM8211_INT
4708 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4709 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4710 u16 addr, u32 value) { \
4711 struct adm8211_priv *priv = dev->priv; \
4712 unsigned int i; \
4713 u32 reg, bitbuf; \
4714 \
4715 value &= v_mask; \
4716 addr &= a_mask; \
4717 bitbuf = (value << v_shift) | (addr << a_shift); \
4718 \
4719 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4720 ADM8211_CSR_READ(SYNRF); \
4721 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4722 ADM8211_CSR_READ(SYNRF); \
4723 \
4724 if (prewrite) { \
4725 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4726 ADM8211_CSR_READ(SYNRF); \
4727 } \
4728 \
4729 for (i = 0; i <= bits; i++) { \
4730 if (bitbuf & (1 << (bits - i))) \
4731 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4732 else \
4733 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4734 \
4735 ADM8211_CSR_WRITE(SYNRF, reg); \
4736 ADM8211_CSR_READ(SYNRF); \
4737 \
4738 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4739 ADM8211_CSR_READ(SYNRF); \
4740 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4741 ADM8211_CSR_READ(SYNRF); \
4742 } \
4743 \
4744 if (postwrite == 1) { \
4745 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4746 ADM8211_CSR_READ(SYNRF); \
4747 } \
4748 if (postwrite == 2) { \
4749 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4750 ADM8211_CSR_READ(SYNRF); \
4751 } \
4752 \
4753 ADM8211_CSR_WRITE(SYNRF, 0); \
4754 ADM8211_CSR_READ(SYNRF); \
4755 }
4756 #undef WRITE_SYN
4757 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4758 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4759 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4760 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4761 #define IEEE80211_DUR_DS_SLOW_ACK 112
4762 #define IEEE80211_DUR_DS_FAST_ACK 56
4763 #define IEEE80211_DUR_DS_SLOW_CTS 112
4764 #define IEEE80211_DUR_DS_FAST_CTS 56
4765 #define IEEE80211_DUR_DS_SLOT 20
4766 #define IEEE80211_DUR_DS_SIFS 10
4767 /* LDV_COMMENT_END_PREP */
4768 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "adm8211_driver" */
4769 ldv_handler_precall();
4770 adm8211_remove( var_group7);
4771 /* LDV_COMMENT_BEGIN_PREP */
4772 #ifdef CONFIG_PM
4773 #endif
4774 #ifdef CONFIG_PM
4775 #endif
4776 /* LDV_COMMENT_END_PREP */
4777 ldv_s_adm8211_driver_pci_driver=0;
4778
4779 }
4780
4781 }
4782
4783 break;
4784 case 15: {
4785
4786 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4787
4788
4789 /* content: static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)*/
4790 /* LDV_COMMENT_BEGIN_PREP */
4791 #define ADM8211_INT(x) \
4792 do { \
4793 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4794 wiphy_debug(dev->wiphy, "%s\n", #x); \
4795 } while (0)
4796 #undef ADM8211_INT
4797 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4798 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4799 u16 addr, u32 value) { \
4800 struct adm8211_priv *priv = dev->priv; \
4801 unsigned int i; \
4802 u32 reg, bitbuf; \
4803 \
4804 value &= v_mask; \
4805 addr &= a_mask; \
4806 bitbuf = (value << v_shift) | (addr << a_shift); \
4807 \
4808 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4809 ADM8211_CSR_READ(SYNRF); \
4810 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4811 ADM8211_CSR_READ(SYNRF); \
4812 \
4813 if (prewrite) { \
4814 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4815 ADM8211_CSR_READ(SYNRF); \
4816 } \
4817 \
4818 for (i = 0; i <= bits; i++) { \
4819 if (bitbuf & (1 << (bits - i))) \
4820 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4821 else \
4822 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4823 \
4824 ADM8211_CSR_WRITE(SYNRF, reg); \
4825 ADM8211_CSR_READ(SYNRF); \
4826 \
4827 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4828 ADM8211_CSR_READ(SYNRF); \
4829 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4830 ADM8211_CSR_READ(SYNRF); \
4831 } \
4832 \
4833 if (postwrite == 1) { \
4834 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4835 ADM8211_CSR_READ(SYNRF); \
4836 } \
4837 if (postwrite == 2) { \
4838 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4839 ADM8211_CSR_READ(SYNRF); \
4840 } \
4841 \
4842 ADM8211_CSR_WRITE(SYNRF, 0); \
4843 ADM8211_CSR_READ(SYNRF); \
4844 }
4845 #undef WRITE_SYN
4846 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4847 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4848 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4849 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4850 #define IEEE80211_DUR_DS_SLOW_ACK 112
4851 #define IEEE80211_DUR_DS_FAST_ACK 56
4852 #define IEEE80211_DUR_DS_SLOW_CTS 112
4853 #define IEEE80211_DUR_DS_FAST_CTS 56
4854 #define IEEE80211_DUR_DS_SLOT 20
4855 #define IEEE80211_DUR_DS_SIFS 10
4856 #ifdef CONFIG_PM
4857 /* LDV_COMMENT_END_PREP */
4858 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "adm8211_driver" */
4859 ldv_handler_precall();
4860 adm8211_suspend( var_group7, var_adm8211_suspend_37_p1);
4861 /* LDV_COMMENT_BEGIN_PREP */
4862 #endif
4863 #ifdef CONFIG_PM
4864 #endif
4865 /* LDV_COMMENT_END_PREP */
4866
4867
4868
4869
4870 }
4871
4872 break;
4873 case 16: {
4874
4875 /** STRUCT: struct type: pci_driver, struct name: adm8211_driver **/
4876
4877
4878 /* content: static int adm8211_resume(struct pci_dev *pdev)*/
4879 /* LDV_COMMENT_BEGIN_PREP */
4880 #define ADM8211_INT(x) \
4881 do { \
4882 if (unlikely(stsr & ADM8211_STSR_ ## x)) \
4883 wiphy_debug(dev->wiphy, "%s\n", #x); \
4884 } while (0)
4885 #undef ADM8211_INT
4886 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4887 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4888 u16 addr, u32 value) { \
4889 struct adm8211_priv *priv = dev->priv; \
4890 unsigned int i; \
4891 u32 reg, bitbuf; \
4892 \
4893 value &= v_mask; \
4894 addr &= a_mask; \
4895 bitbuf = (value << v_shift) | (addr << a_shift); \
4896 \
4897 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4898 ADM8211_CSR_READ(SYNRF); \
4899 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4900 ADM8211_CSR_READ(SYNRF); \
4901 \
4902 if (prewrite) { \
4903 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4904 ADM8211_CSR_READ(SYNRF); \
4905 } \
4906 \
4907 for (i = 0; i <= bits; i++) { \
4908 if (bitbuf & (1 << (bits - i))) \
4909 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4910 else \
4911 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4912 \
4913 ADM8211_CSR_WRITE(SYNRF, reg); \
4914 ADM8211_CSR_READ(SYNRF); \
4915 \
4916 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
4917 ADM8211_CSR_READ(SYNRF); \
4918 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
4919 ADM8211_CSR_READ(SYNRF); \
4920 } \
4921 \
4922 if (postwrite == 1) { \
4923 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
4924 ADM8211_CSR_READ(SYNRF); \
4925 } \
4926 if (postwrite == 2) { \
4927 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
4928 ADM8211_CSR_READ(SYNRF); \
4929 } \
4930 \
4931 ADM8211_CSR_WRITE(SYNRF, 0); \
4932 ADM8211_CSR_READ(SYNRF); \
4933 }
4934 #undef WRITE_SYN
4935 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
4936 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
4937 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
4938 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
4939 #define IEEE80211_DUR_DS_SLOW_ACK 112
4940 #define IEEE80211_DUR_DS_FAST_ACK 56
4941 #define IEEE80211_DUR_DS_SLOW_CTS 112
4942 #define IEEE80211_DUR_DS_FAST_CTS 56
4943 #define IEEE80211_DUR_DS_SLOT 20
4944 #define IEEE80211_DUR_DS_SIFS 10
4945 #ifdef CONFIG_PM
4946 /* LDV_COMMENT_END_PREP */
4947 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "adm8211_driver" */
4948 ldv_handler_precall();
4949 adm8211_resume( var_group7);
4950 /* LDV_COMMENT_BEGIN_PREP */
4951 #endif
4952 #ifdef CONFIG_PM
4953 #endif
4954 /* LDV_COMMENT_END_PREP */
4955
4956
4957
4958
4959 }
4960
4961 break;
4962 case 17: {
4963
4964 /** CALLBACK SECTION request_irq **/
4965 LDV_IN_INTERRUPT=2;
4966
4967 /* content: static irqreturn_t adm8211_interrupt(int irq, void *dev_id)*/
4968 /* LDV_COMMENT_END_PREP */
4969 /* LDV_COMMENT_FUNCTION_CALL */
4970 ldv_handler_precall();
4971 adm8211_interrupt( var_adm8211_interrupt_9_p0, var_adm8211_interrupt_9_p1);
4972 /* LDV_COMMENT_BEGIN_PREP */
4973 #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
4974 static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
4975 u16 addr, u32 value) { \
4976 struct adm8211_priv *priv = dev->priv; \
4977 unsigned int i; \
4978 u32 reg, bitbuf; \
4979 \
4980 value &= v_mask; \
4981 addr &= a_mask; \
4982 bitbuf = (value << v_shift) | (addr << a_shift); \
4983 \
4984 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
4985 ADM8211_CSR_READ(SYNRF); \
4986 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
4987 ADM8211_CSR_READ(SYNRF); \
4988 \
4989 if (prewrite) { \
4990 ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
4991 ADM8211_CSR_READ(SYNRF); \
4992 } \
4993 \
4994 for (i = 0; i <= bits; i++) { \
4995 if (bitbuf & (1 << (bits - i))) \
4996 reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
4997 else \
4998 reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
4999 \
5000 ADM8211_CSR_WRITE(SYNRF, reg); \
5001 ADM8211_CSR_READ(SYNRF); \
5002 \
5003 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
5004 ADM8211_CSR_READ(SYNRF); \
5005 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
5006 ADM8211_CSR_READ(SYNRF); \
5007 } \
5008 \
5009 if (postwrite == 1) { \
5010 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
5011 ADM8211_CSR_READ(SYNRF); \
5012 } \
5013 if (postwrite == 2) { \
5014 ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
5015 ADM8211_CSR_READ(SYNRF); \
5016 } \
5017 \
5018 ADM8211_CSR_WRITE(SYNRF, 0); \
5019 ADM8211_CSR_READ(SYNRF); \
5020 }
5021 #undef WRITE_SYN
5022 #define IEEE80211_DUR_DS_LONG_PREAMBLE 144
5023 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
5024 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24
5025 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
5026 #define IEEE80211_DUR_DS_SLOW_ACK 112
5027 #define IEEE80211_DUR_DS_FAST_ACK 56
5028 #define IEEE80211_DUR_DS_SLOW_CTS 112
5029 #define IEEE80211_DUR_DS_FAST_CTS 56
5030 #define IEEE80211_DUR_DS_SLOT 20
5031 #define IEEE80211_DUR_DS_SIFS 10
5032 #ifdef CONFIG_PM
5033 #endif
5034 #ifdef CONFIG_PM
5035 #endif
5036 /* LDV_COMMENT_END_PREP */
5037 LDV_IN_INTERRUPT=1;
5038
5039
5040
5041 }
5042
5043 break;
5044 default: break;
5045
5046 }
5047
5048 }
5049
5050 ldv_module_exit:
5051
5052 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
5053 ldv_final: ldv_check_final_state();
5054
5055 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
5056 return;
5057
5058 }
5059 #endif
5060
5061 /* LDV_COMMENT_END_MAIN */
5062
5063 #line 10 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/3548/dscv_tempdir/dscv/ri/331_1a/drivers/net/wireless/admtek/adm8211.o.c.prepared" 1
2 #include <verifier/rcv.h>
3 #include <kernel-model/ERR.inc>
4
5 int LDV_DMA_MAP_CALLS = 0;
6
7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
8 void ldv_dma_map_page(void) {
9 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
10 ldv_assert(LDV_DMA_MAP_CALLS == 0);
11 /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
12 LDV_DMA_MAP_CALLS++;
13 }
14
15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
16 void ldv_dma_mapping_error(void) {
17 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
18 ldv_assert(LDV_DMA_MAP_CALLS != 0);
19 /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
20 LDV_DMA_MAP_CALLS--;
21 }
22
23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
24 void ldv_check_final_state(void) {
25 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
26 ldv_assert(LDV_DMA_MAP_CALLS == 0);
27 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
7 #include <linux/err.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
13
14 /**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22 /*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27 /*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32 /*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37 /*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42 /*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48 /*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53 /*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59 /*
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
62 */
63 #define DMA_ATTR_NO_WARN (1UL << 8)
64
65 /*
66 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between
69 * its physical address space and the bus address space.
70 */
71 struct dma_map_ops {
72 void* (*alloc)(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp,
74 unsigned long attrs);
75 void (*free)(struct device *dev, size_t size,
76 void *vaddr, dma_addr_t dma_handle,
77 unsigned long attrs);
78 int (*mmap)(struct device *, struct vm_area_struct *,
79 void *, dma_addr_t, size_t,
80 unsigned long attrs);
81
82 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
83 dma_addr_t, size_t, unsigned long attrs);
84
85 dma_addr_t (*map_page)(struct device *dev, struct page *page,
86 unsigned long offset, size_t size,
87 enum dma_data_direction dir,
88 unsigned long attrs);
89 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
90 size_t size, enum dma_data_direction dir,
91 unsigned long attrs);
92 /*
93 * map_sg returns 0 on error and a value > 0 on success.
94 * It should never return a value < 0.
95 */
96 int (*map_sg)(struct device *dev, struct scatterlist *sg,
97 int nents, enum dma_data_direction dir,
98 unsigned long attrs);
99 void (*unmap_sg)(struct device *dev,
100 struct scatterlist *sg, int nents,
101 enum dma_data_direction dir,
102 unsigned long attrs);
103 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
104 size_t size, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
107 size_t size, enum dma_data_direction dir,
108 unsigned long attrs);
109 void (*sync_single_for_cpu)(struct device *dev,
110 dma_addr_t dma_handle, size_t size,
111 enum dma_data_direction dir);
112 void (*sync_single_for_device)(struct device *dev,
113 dma_addr_t dma_handle, size_t size,
114 enum dma_data_direction dir);
115 void (*sync_sg_for_cpu)(struct device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction dir);
118 void (*sync_sg_for_device)(struct device *dev,
119 struct scatterlist *sg, int nents,
120 enum dma_data_direction dir);
121 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
122 int (*dma_supported)(struct device *dev, u64 mask);
123 int (*set_dma_mask)(struct device *dev, u64 mask);
124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
125 u64 (*get_required_mask)(struct device *dev);
126 #endif
127 int is_phys;
128 };
129
130 extern struct dma_map_ops dma_noop_ops;
131
132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
133
134 #define DMA_MASK_NONE 0x0ULL
135
136 static inline int valid_dma_direction(int dma_direction)
137 {
138 return ((dma_direction == DMA_BIDIRECTIONAL) ||
139 (dma_direction == DMA_TO_DEVICE) ||
140 (dma_direction == DMA_FROM_DEVICE));
141 }
142
143 static inline int is_device_dma_capable(struct device *dev)
144 {
145 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
146 }
147
148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
149 /*
150 * These three functions are only for dma allocator.
151 * Don't use them in device drivers.
152 */
153 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
154 dma_addr_t *dma_handle, void **ret);
155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
156
157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
158 void *cpu_addr, size_t size, int *ret);
159 #else
160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
161 #define dma_release_from_coherent(dev, order, vaddr) (0)
162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
164
165 #ifdef CONFIG_HAS_DMA
166 #include <asm/dma-mapping.h>
167 #else
168 /*
169 * Define the dma api to allow compilation but not linking of
170 * dma dependent code. Code that depends on the dma-mapping
171 * API needs to set 'depends on HAS_DMA' in its Kconfig
172 */
173 extern struct dma_map_ops bad_dma_ops;
174 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
175 {
176 return &bad_dma_ops;
177 }
178 #endif
179
180 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
181 size_t size,
182 enum dma_data_direction dir,
183 unsigned long attrs)
184 {
185 struct dma_map_ops *ops = get_dma_ops(dev);
186 dma_addr_t addr;
187
188 kmemcheck_mark_initialized(ptr, size);
189 BUG_ON(!valid_dma_direction(dir));
190 addr = ops->map_page(dev, virt_to_page(ptr),
191 offset_in_page(ptr), size,
192 dir, attrs);
193 debug_dma_map_page(dev, virt_to_page(ptr),
194 offset_in_page(ptr), size,
195 dir, addr, true);
196 return addr;
197 }
198
199 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
200 size_t size,
201 enum dma_data_direction dir,
202 unsigned long attrs)
203 {
204 struct dma_map_ops *ops = get_dma_ops(dev);
205
206 BUG_ON(!valid_dma_direction(dir));
207 if (ops->unmap_page)
208 ops->unmap_page(dev, addr, size, dir, attrs);
209 debug_dma_unmap_page(dev, addr, size, dir, true);
210 }
211
212 /*
213 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
214 * It should never return a value < 0.
215 */
216 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
217 int nents, enum dma_data_direction dir,
218 unsigned long attrs)
219 {
220 struct dma_map_ops *ops = get_dma_ops(dev);
221 int i, ents;
222 struct scatterlist *s;
223
224 for_each_sg(sg, s, nents, i)
225 kmemcheck_mark_initialized(sg_virt(s), s->length);
226 BUG_ON(!valid_dma_direction(dir));
227 ents = ops->map_sg(dev, sg, nents, dir, attrs);
228 BUG_ON(ents < 0);
229 debug_dma_map_sg(dev, sg, nents, ents, dir);
230
231 return ents;
232 }
233
234 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
235 int nents, enum dma_data_direction dir,
236 unsigned long attrs)
237 {
238 struct dma_map_ops *ops = get_dma_ops(dev);
239
240 BUG_ON(!valid_dma_direction(dir));
241 debug_dma_unmap_sg(dev, sg, nents, dir);
242 if (ops->unmap_sg)
243 ops->unmap_sg(dev, sg, nents, dir, attrs);
244 }
245
246 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
247 size_t offset, size_t size,
248 enum dma_data_direction dir)
249 {
250 struct dma_map_ops *ops = get_dma_ops(dev);
251 dma_addr_t addr;
252
253 kmemcheck_mark_initialized(page_address(page) + offset, size);
254 BUG_ON(!valid_dma_direction(dir));
255 addr = ops->map_page(dev, page, offset, size, dir, 0);
256 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
257
258 return addr;
259 }
260
261 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
262 size_t size, enum dma_data_direction dir)
263 {
264 struct dma_map_ops *ops = get_dma_ops(dev);
265
266 BUG_ON(!valid_dma_direction(dir));
267 if (ops->unmap_page)
268 ops->unmap_page(dev, addr, size, dir, 0);
269 debug_dma_unmap_page(dev, addr, size, dir, false);
270 }
271
272 static inline dma_addr_t dma_map_resource(struct device *dev,
273 phys_addr_t phys_addr,
274 size_t size,
275 enum dma_data_direction dir,
276 unsigned long attrs)
277 {
278 struct dma_map_ops *ops = get_dma_ops(dev);
279 dma_addr_t addr;
280
281 BUG_ON(!valid_dma_direction(dir));
282
283 /* Don't allow RAM to be mapped */
284 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
285
286 addr = phys_addr;
287 if (ops->map_resource)
288 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
289
290 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
291
292 return addr;
293 }
294
295 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
296 size_t size, enum dma_data_direction dir,
297 unsigned long attrs)
298 {
299 struct dma_map_ops *ops = get_dma_ops(dev);
300
301 BUG_ON(!valid_dma_direction(dir));
302 if (ops->unmap_resource)
303 ops->unmap_resource(dev, addr, size, dir, attrs);
304 debug_dma_unmap_resource(dev, addr, size, dir);
305 }
306
307 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
308 size_t size,
309 enum dma_data_direction dir)
310 {
311 struct dma_map_ops *ops = get_dma_ops(dev);
312
313 BUG_ON(!valid_dma_direction(dir));
314 if (ops->sync_single_for_cpu)
315 ops->sync_single_for_cpu(dev, addr, size, dir);
316 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
317 }
318
319 static inline void dma_sync_single_for_device(struct device *dev,
320 dma_addr_t addr, size_t size,
321 enum dma_data_direction dir)
322 {
323 struct dma_map_ops *ops = get_dma_ops(dev);
324
325 BUG_ON(!valid_dma_direction(dir));
326 if (ops->sync_single_for_device)
327 ops->sync_single_for_device(dev, addr, size, dir);
328 debug_dma_sync_single_for_device(dev, addr, size, dir);
329 }
330
331 static inline void dma_sync_single_range_for_cpu(struct device *dev,
332 dma_addr_t addr,
333 unsigned long offset,
334 size_t size,
335 enum dma_data_direction dir)
336 {
337 const struct dma_map_ops *ops = get_dma_ops(dev);
338
339 BUG_ON(!valid_dma_direction(dir));
340 if (ops->sync_single_for_cpu)
341 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
342 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
343 }
344
345 static inline void dma_sync_single_range_for_device(struct device *dev,
346 dma_addr_t addr,
347 unsigned long offset,
348 size_t size,
349 enum dma_data_direction dir)
350 {
351 const struct dma_map_ops *ops = get_dma_ops(dev);
352
353 BUG_ON(!valid_dma_direction(dir));
354 if (ops->sync_single_for_device)
355 ops->sync_single_for_device(dev, addr + offset, size, dir);
356 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
357 }
358
359 static inline void
360 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
361 int nelems, enum dma_data_direction dir)
362 {
363 struct dma_map_ops *ops = get_dma_ops(dev);
364
365 BUG_ON(!valid_dma_direction(dir));
366 if (ops->sync_sg_for_cpu)
367 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
368 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
369 }
370
371 static inline void
372 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
373 int nelems, enum dma_data_direction dir)
374 {
375 struct dma_map_ops *ops = get_dma_ops(dev);
376
377 BUG_ON(!valid_dma_direction(dir));
378 if (ops->sync_sg_for_device)
379 ops->sync_sg_for_device(dev, sg, nelems, dir);
380 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
381
382 }
383
384 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
385 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
386 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
387 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
388
389 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
390 void *cpu_addr, dma_addr_t dma_addr, size_t size);
391
392 void *dma_common_contiguous_remap(struct page *page, size_t size,
393 unsigned long vm_flags,
394 pgprot_t prot, const void *caller);
395
396 void *dma_common_pages_remap(struct page **pages, size_t size,
397 unsigned long vm_flags, pgprot_t prot,
398 const void *caller);
399 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
400
401 /**
402 * dma_mmap_attrs - map a coherent DMA allocation into user space
403 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
404 * @vma: vm_area_struct describing requested user mapping
405 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
406 * @handle: device-view address returned from dma_alloc_attrs
407 * @size: size of memory originally requested in dma_alloc_attrs
408 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
409 *
410 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
411 * into user space. The coherent DMA buffer must not be freed by the
412 * driver until the user space mapping has been released.
413 */
414 static inline int
415 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
416 dma_addr_t dma_addr, size_t size, unsigned long attrs)
417 {
418 struct dma_map_ops *ops = get_dma_ops(dev);
419 BUG_ON(!ops);
420 if (ops->mmap)
421 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
422 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
423 }
424
425 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
426
427 int
428 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
429 void *cpu_addr, dma_addr_t dma_addr, size_t size);
430
431 static inline int
432 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
433 dma_addr_t dma_addr, size_t size,
434 unsigned long attrs)
435 {
436 struct dma_map_ops *ops = get_dma_ops(dev);
437 BUG_ON(!ops);
438 if (ops->get_sgtable)
439 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
440 attrs);
441 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
442 }
443
444 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
445
446 #ifndef arch_dma_alloc_attrs
447 #define arch_dma_alloc_attrs(dev, flag) (true)
448 #endif
449
450 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
451 dma_addr_t *dma_handle, gfp_t flag,
452 unsigned long attrs)
453 {
454 struct dma_map_ops *ops = get_dma_ops(dev);
455 void *cpu_addr;
456
457 BUG_ON(!ops);
458
459 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
460 return cpu_addr;
461
462 if (!arch_dma_alloc_attrs(&dev, &flag))
463 return NULL;
464 if (!ops->alloc)
465 return NULL;
466
467 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
468 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
469 return cpu_addr;
470 }
471
472 static inline void dma_free_attrs(struct device *dev, size_t size,
473 void *cpu_addr, dma_addr_t dma_handle,
474 unsigned long attrs)
475 {
476 struct dma_map_ops *ops = get_dma_ops(dev);
477
478 BUG_ON(!ops);
479 WARN_ON(irqs_disabled());
480
481 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
482 return;
483
484 if (!ops->free || !cpu_addr)
485 return;
486
487 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
488 ops->free(dev, size, cpu_addr, dma_handle, attrs);
489 }
490
491 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
492 dma_addr_t *dma_handle, gfp_t flag)
493 {
494 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
495 }
496
497 static inline void dma_free_coherent(struct device *dev, size_t size,
498 void *cpu_addr, dma_addr_t dma_handle)
499 {
500 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
501 }
502
503 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
504 dma_addr_t *dma_handle, gfp_t gfp)
505 {
506 return dma_alloc_attrs(dev, size, dma_handle, gfp,
507 DMA_ATTR_NON_CONSISTENT);
508 }
509
510 static inline void dma_free_noncoherent(struct device *dev, size_t size,
511 void *cpu_addr, dma_addr_t dma_handle)
512 {
513 dma_free_attrs(dev, size, cpu_addr, dma_handle,
514 DMA_ATTR_NON_CONSISTENT);
515 }
516
517 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
518 {
519 debug_dma_mapping_error(dev, dma_addr);
520
521 if (get_dma_ops(dev)->mapping_error)
522 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
523
524 #ifdef DMA_ERROR_CODE
525 return dma_addr == DMA_ERROR_CODE;
526 #else
527 return 0;
528 #endif
529 }
530
531 #ifndef HAVE_ARCH_DMA_SUPPORTED
532 static inline int dma_supported(struct device *dev, u64 mask)
533 {
534 struct dma_map_ops *ops = get_dma_ops(dev);
535
536 if (!ops)
537 return 0;
538 if (!ops->dma_supported)
539 return 1;
540 return ops->dma_supported(dev, mask);
541 }
542 #endif
543
544 #ifndef HAVE_ARCH_DMA_SET_MASK
545 static inline int dma_set_mask(struct device *dev, u64 mask)
546 {
547 struct dma_map_ops *ops = get_dma_ops(dev);
548
549 if (ops->set_dma_mask)
550 return ops->set_dma_mask(dev, mask);
551
552 if (!dev->dma_mask || !dma_supported(dev, mask))
553 return -EIO;
554 *dev->dma_mask = mask;
555 return 0;
556 }
557 #endif
558
559 static inline u64 dma_get_mask(struct device *dev)
560 {
561 if (dev && dev->dma_mask && *dev->dma_mask)
562 return *dev->dma_mask;
563 return DMA_BIT_MASK(32);
564 }
565
566 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
567 int dma_set_coherent_mask(struct device *dev, u64 mask);
568 #else
569 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
570 {
571 if (!dma_supported(dev, mask))
572 return -EIO;
573 dev->coherent_dma_mask = mask;
574 return 0;
575 }
576 #endif
577
578 /*
579 * Set both the DMA mask and the coherent DMA mask to the same thing.
580 * Note that we don't check the return value from dma_set_coherent_mask()
581 * as the DMA API guarantees that the coherent DMA mask can be set to
582 * the same or smaller than the streaming DMA mask.
583 */
584 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
585 {
586 int rc = dma_set_mask(dev, mask);
587 if (rc == 0)
588 dma_set_coherent_mask(dev, mask);
589 return rc;
590 }
591
592 /*
593 * Similar to the above, except it deals with the case where the device
594 * does not have dev->dma_mask appropriately setup.
595 */
596 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
597 {
598 dev->dma_mask = &dev->coherent_dma_mask;
599 return dma_set_mask_and_coherent(dev, mask);
600 }
601
602 extern u64 dma_get_required_mask(struct device *dev);
603
604 #ifndef arch_setup_dma_ops
605 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
606 u64 size, const struct iommu_ops *iommu,
607 bool coherent) { }
608 #endif
609
610 #ifndef arch_teardown_dma_ops
611 static inline void arch_teardown_dma_ops(struct device *dev) { }
612 #endif
613
614 static inline unsigned int dma_get_max_seg_size(struct device *dev)
615 {
616 if (dev->dma_parms && dev->dma_parms->max_segment_size)
617 return dev->dma_parms->max_segment_size;
618 return SZ_64K;
619 }
620
621 static inline unsigned int dma_set_max_seg_size(struct device *dev,
622 unsigned int size)
623 {
624 if (dev->dma_parms) {
625 dev->dma_parms->max_segment_size = size;
626 return 0;
627 }
628 return -EIO;
629 }
630
631 static inline unsigned long dma_get_seg_boundary(struct device *dev)
632 {
633 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
634 return dev->dma_parms->segment_boundary_mask;
635 return DMA_BIT_MASK(32);
636 }
637
638 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
639 {
640 if (dev->dma_parms) {
641 dev->dma_parms->segment_boundary_mask = mask;
642 return 0;
643 }
644 return -EIO;
645 }
646
647 #ifndef dma_max_pfn
648 static inline unsigned long dma_max_pfn(struct device *dev)
649 {
650 return *dev->dma_mask >> PAGE_SHIFT;
651 }
652 #endif
653
654 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
655 dma_addr_t *dma_handle, gfp_t flag)
656 {
657 void *ret = dma_alloc_coherent(dev, size, dma_handle,
658 flag | __GFP_ZERO);
659 return ret;
660 }
661
662 #ifdef CONFIG_HAS_DMA
663 static inline int dma_get_cache_alignment(void)
664 {
665 #ifdef ARCH_DMA_MINALIGN
666 return ARCH_DMA_MINALIGN;
667 #endif
668 return 1;
669 }
670 #endif
671
672 /* flags for the coherent memory api */
673 #define DMA_MEMORY_MAP 0x01
674 #define DMA_MEMORY_IO 0x02
675 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
676 #define DMA_MEMORY_EXCLUSIVE 0x08
677
678 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
679 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
680 dma_addr_t device_addr, size_t size, int flags);
681 void dma_release_declared_memory(struct device *dev);
682 void *dma_mark_declared_memory_occupied(struct device *dev,
683 dma_addr_t device_addr, size_t size);
684 #else
685 static inline int
686 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
687 dma_addr_t device_addr, size_t size, int flags)
688 {
689 return 0;
690 }
691
692 static inline void
693 dma_release_declared_memory(struct device *dev)
694 {
695 }
696
697 static inline void *
698 dma_mark_declared_memory_occupied(struct device *dev,
699 dma_addr_t device_addr, size_t size)
700 {
701 return ERR_PTR(-EBUSY);
702 }
703 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
704
705 /*
706 * Managed DMA API
707 */
708 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
709 dma_addr_t *dma_handle, gfp_t gfp);
710 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
711 dma_addr_t dma_handle);
712 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
713 dma_addr_t *dma_handle, gfp_t gfp);
714 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
715 dma_addr_t dma_handle);
716 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
717 extern int dmam_declare_coherent_memory(struct device *dev,
718 phys_addr_t phys_addr,
719 dma_addr_t device_addr, size_t size,
720 int flags);
721 extern void dmam_release_declared_memory(struct device *dev);
722 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
723 static inline int dmam_declare_coherent_memory(struct device *dev,
724 phys_addr_t phys_addr, dma_addr_t device_addr,
725 size_t size, gfp_t gfp)
726 {
727 return 0;
728 }
729
730 static inline void dmam_release_declared_memory(struct device *dev)
731 {
732 }
733 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
734
735 static inline void *dma_alloc_wc(struct device *dev, size_t size,
736 dma_addr_t *dma_addr, gfp_t gfp)
737 {
738 return dma_alloc_attrs(dev, size, dma_addr, gfp,
739 DMA_ATTR_WRITE_COMBINE);
740 }
741 #ifndef dma_alloc_writecombine
742 #define dma_alloc_writecombine dma_alloc_wc
743 #endif
744
745 static inline void dma_free_wc(struct device *dev, size_t size,
746 void *cpu_addr, dma_addr_t dma_addr)
747 {
748 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
749 DMA_ATTR_WRITE_COMBINE);
750 }
751 #ifndef dma_free_writecombine
752 #define dma_free_writecombine dma_free_wc
753 #endif
754
755 static inline int dma_mmap_wc(struct device *dev,
756 struct vm_area_struct *vma,
757 void *cpu_addr, dma_addr_t dma_addr,
758 size_t size)
759 {
760 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
761 DMA_ATTR_WRITE_COMBINE);
762 }
763 #ifndef dma_mmap_writecombine
764 #define dma_mmap_writecombine dma_mmap_wc
765 #endif
766
767 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
768 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
769 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
770 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
771 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
772 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
773 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
774 #else
775 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
776 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
777 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
778 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
779 #define dma_unmap_len(PTR, LEN_NAME) (0)
780 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
781 #endif
782
783 #endif 1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
3
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
6
7 #ifdef CONFIG_KMEMCHECK
8 extern int kmemcheck_enabled;
9
10 /* The slab-related functions. */
11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23 bool kmemcheck_page_is_tracked(struct page *p);
24
25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27 void kmemcheck_mark_initialized(void *address, unsigned int n);
28 void kmemcheck_mark_freed(void *address, unsigned int n);
29
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
33
34 int kmemcheck_show_addr(unsigned long address);
35 int kmemcheck_hide_addr(unsigned long address);
36
37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
39 /*
40 * Bitfield annotations
41 *
42 * How to use: If you have a struct using bitfields, for example
43 *
44 * struct a {
45 * int x:8, y:8;
46 * };
47 *
48 * then this should be rewritten as
49 *
50 * struct a {
51 * kmemcheck_bitfield_begin(flags);
52 * int x:8, y:8;
53 * kmemcheck_bitfield_end(flags);
54 * };
55 *
56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
57 * beginning and end, respectively, of the bitfield (and things like
58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59 * fields should be annotated:
60 *
61 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62 * kmemcheck_annotate_bitfield(a, flags);
63 */
64 #define kmemcheck_bitfield_begin(name) \
65 int name##_begin[0];
66
67 #define kmemcheck_bitfield_end(name) \
68 int name##_end[0];
69
70 #define kmemcheck_annotate_bitfield(ptr, name) \
71 do { \
72 int _n; \
73 \
74 if (!ptr) \
75 break; \
76 \
77 _n = (long) &((ptr)->name##_end) \
78 - (long) &((ptr)->name##_begin); \
79 BUILD_BUG_ON(_n < 0); \
80 \
81 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
82 } while (0)
83
84 #define kmemcheck_annotate_variable(var) \
85 do { \
86 kmemcheck_mark_initialized(&(var), sizeof(var)); \
87 } while (0) \
88
89 #else
90 #define kmemcheck_enabled 0
91
92 static inline void
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
94 {
95 }
96
97 static inline void
98 kmemcheck_free_shadow(struct page *page, int order)
99 {
100 }
101
102 static inline void
103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
104 size_t size)
105 {
106 }
107
108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
109 size_t size)
110 {
111 }
112
113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
114 unsigned int order, gfp_t gfpflags)
115 {
116 }
117
118 static inline bool kmemcheck_page_is_tracked(struct page *p)
119 {
120 return false;
121 }
122
123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
124 {
125 }
126
127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
128 {
129 }
130
131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
132 {
133 }
134
135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
136 {
137 }
138
139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
140 unsigned int n)
141 {
142 }
143
144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
145 unsigned int n)
146 {
147 }
148
149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
150 unsigned int n)
151 {
152 }
153
154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
155 {
156 return true;
157 }
158
159 #define kmemcheck_bitfield_begin(name)
160 #define kmemcheck_bitfield_end(name)
161 #define kmemcheck_annotate_bitfield(ptr, name) \
162 do { \
163 } while (0)
164
165 #define kmemcheck_annotate_variable(var) \
166 do { \
167 } while (0)
168
169 #endif /* CONFIG_KMEMCHECK */
170
171 #endif /* LINUX_KMEMCHECK_H */ 1 /* include this file if the platform implements the dma_ DMA Mapping API
2 * and wants to provide the pci_ DMA Mapping API in terms of it */
3
4 #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
5 #define _ASM_GENERIC_PCI_DMA_COMPAT_H
6
7 #include <linux/dma-mapping.h>
8
9 /* This defines the direction arg to the DMA mapping routines. */
10 #define PCI_DMA_BIDIRECTIONAL 0
11 #define PCI_DMA_TODEVICE 1
12 #define PCI_DMA_FROMDEVICE 2
13 #define PCI_DMA_NONE 3
14
15 static inline void *
16 pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
17 dma_addr_t *dma_handle)
18 {
19 return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
20 }
21
22 static inline void *
23 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
24 dma_addr_t *dma_handle)
25 {
26 return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
27 size, dma_handle, GFP_ATOMIC);
28 }
29
30 static inline void
31 pci_free_consistent(struct pci_dev *hwdev, size_t size,
32 void *vaddr, dma_addr_t dma_handle)
33 {
34 dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
35 }
36
37 static inline dma_addr_t
38 pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
39 {
40 return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
41 }
42
43 static inline void
44 pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
45 size_t size, int direction)
46 {
47 dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
48 }
49
50 static inline dma_addr_t
51 pci_map_page(struct pci_dev *hwdev, struct page *page,
52 unsigned long offset, size_t size, int direction)
53 {
54 return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
55 }
56
57 static inline void
58 pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
59 size_t size, int direction)
60 {
61 dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
62 }
63
64 static inline int
65 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
66 int nents, int direction)
67 {
68 return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
69 }
70
71 static inline void
72 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
73 int nents, int direction)
74 {
75 dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
76 }
77
78 static inline void
79 pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
80 size_t size, int direction)
81 {
82 dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
83 }
84
85 static inline void
86 pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
87 size_t size, int direction)
88 {
89 dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
90 }
91
92 static inline void
93 pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
94 int nelems, int direction)
95 {
96 dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
97 }
98
99 static inline void
100 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
101 int nelems, int direction)
102 {
103 dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
104 }
105
106 static inline int
107 pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
108 {
109 return dma_mapping_error(&pdev->dev, dma_addr);
110 }
111
112 #ifdef CONFIG_PCI
113 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
114 {
115 return dma_set_mask(&dev->dev, mask);
116 }
117
118 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
119 {
120 return dma_set_coherent_mask(&dev->dev, mask);
121 }
122
123 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
124 unsigned int size)
125 {
126 return dma_set_max_seg_size(&dev->dev, size);
127 }
128
129 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
130 unsigned long mask)
131 {
132 return dma_set_seg_boundary(&dev->dev, mask);
133 }
134 #else
135 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
136 { return -EIO; }
137 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
138 { return -EIO; }
139 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
140 unsigned int size)
141 { return -EIO; }
142 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
143 unsigned long mask)
144 { return -EIO; }
145 #endif
146
147 #endif 1 /*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16
17 #include <linux/kernel.h>
18 #include <linux/kmemcheck.h>
19 #include <linux/compiler.h>
20 #include <linux/time.h>
21 #include <linux/bug.h>
22 #include <linux/cache.h>
23 #include <linux/rbtree.h>
24 #include <linux/socket.h>
25
26 #include <linux/atomic.h>
27 #include <asm/types.h>
28 #include <linux/spinlock.h>
29 #include <linux/net.h>
30 #include <linux/textsearch.h>
31 #include <net/checksum.h>
32 #include <linux/rcupdate.h>
33 #include <linux/hrtimer.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/netdev_features.h>
36 #include <linux/sched.h>
37 #include <net/flow_dissector.h>
38 #include <linux/splice.h>
39 #include <linux/in6.h>
40 #include <linux/if_packet.h>
41 #include <net/flow.h>
42
43 /* The interface for checksum offload between the stack and networking drivers
44 * is as follows...
45 *
46 * A. IP checksum related features
47 *
48 * Drivers advertise checksum offload capabilities in the features of a device.
49 * From the stack's point of view these are capabilities offered by the driver,
50 * a driver typically only advertises features that it is capable of offloading
51 * to its device.
52 *
53 * The checksum related features are:
54 *
55 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
56 * IP (one's complement) checksum for any combination
57 * of protocols or protocol layering. The checksum is
58 * computed and set in a packet per the CHECKSUM_PARTIAL
59 * interface (see below).
60 *
61 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
62 * TCP or UDP packets over IPv4. These are specifically
63 * unencapsulated packets of the form IPv4|TCP or
64 * IPv4|UDP where the Protocol field in the IPv4 header
65 * is TCP or UDP. The IPv4 header may contain IP options
66 * This feature cannot be set in features for a device
67 * with NETIF_F_HW_CSUM also set. This feature is being
68 * DEPRECATED (see below).
69 *
70 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
71 * TCP or UDP packets over IPv6. These are specifically
72 * unencapsulated packets of the form IPv6|TCP or
73 * IPv4|UDP where the Next Header field in the IPv6
74 * header is either TCP or UDP. IPv6 extension headers
75 * are not supported with this feature. This feature
76 * cannot be set in features for a device with
77 * NETIF_F_HW_CSUM also set. This feature is being
78 * DEPRECATED (see below).
79 *
80 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
81 * This flag is used only used to disable the RX checksum
82 * feature for a device. The stack will accept receive
83 * checksum indication in packets received on a device
84 * regardless of whether NETIF_F_RXCSUM is set.
85 *
86 * B. Checksumming of received packets by device. Indication of checksum
87 * verification is in set skb->ip_summed. Possible values are:
88 *
89 * CHECKSUM_NONE:
90 *
91 * Device did not checksum this packet e.g. due to lack of capabilities.
92 * The packet contains full (though not verified) checksum in packet but
93 * not in skb->csum. Thus, skb->csum is undefined in this case.
94 *
95 * CHECKSUM_UNNECESSARY:
96 *
97 * The hardware you're dealing with doesn't calculate the full checksum
98 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
99 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
100 * if their checksums are okay. skb->csum is still undefined in this case
101 * though. A driver or device must never modify the checksum field in the
102 * packet even if checksum is verified.
103 *
104 * CHECKSUM_UNNECESSARY is applicable to following protocols:
105 * TCP: IPv6 and IPv4.
106 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
107 * zero UDP checksum for either IPv4 or IPv6, the networking stack
108 * may perform further validation in this case.
109 * GRE: only if the checksum is present in the header.
110 * SCTP: indicates the CRC in SCTP header has been validated.
111 *
112 * skb->csum_level indicates the number of consecutive checksums found in
113 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
114 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
115 * and a device is able to verify the checksums for UDP (possibly zero),
116 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
117 * two. If the device were only able to verify the UDP checksum and not
118 * GRE, either because it doesn't support GRE checksum of because GRE
119 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
120 * not considered in this case).
121 *
122 * CHECKSUM_COMPLETE:
123 *
124 * This is the most generic way. The device supplied checksum of the _whole_
125 * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
126 * hardware doesn't need to parse L3/L4 headers to implement this.
127 *
128 * Note: Even if device supports only some protocols, but is able to produce
129 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
130 *
131 * CHECKSUM_PARTIAL:
132 *
133 * A checksum is set up to be offloaded to a device as described in the
134 * output description for CHECKSUM_PARTIAL. This may occur on a packet
135 * received directly from another Linux OS, e.g., a virtualized Linux kernel
136 * on the same host, or it may be set in the input path in GRO or remote
137 * checksum offload. For the purposes of checksum verification, the checksum
138 * referred to by skb->csum_start + skb->csum_offset and any preceding
139 * checksums in the packet are considered verified. Any checksums in the
140 * packet that are after the checksum being offloaded are not considered to
141 * be verified.
142 *
143 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
144 * in the skb->ip_summed for a packet. Values are:
145 *
146 * CHECKSUM_PARTIAL:
147 *
148 * The driver is required to checksum the packet as seen by hard_start_xmit()
149 * from skb->csum_start up to the end, and to record/write the checksum at
150 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
151 * csum_start and csum_offset values are valid values given the length and
152 * offset of the packet, however they should not attempt to validate that the
153 * checksum refers to a legitimate transport layer checksum-- it is the
154 * purview of the stack to validate that csum_start and csum_offset are set
155 * correctly.
156 *
157 * When the stack requests checksum offload for a packet, the driver MUST
158 * ensure that the checksum is set correctly. A driver can either offload the
159 * checksum calculation to the device, or call skb_checksum_help (in the case
160 * that the device does not support offload for a particular checksum).
161 *
162 * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
163 * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
164 * checksum offload capability. If a device has limited checksum capabilities
165 * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
166 * described above) a helper function can be called to resolve
167 * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
168 * function takes a spec argument that describes the protocol layer that is
169 * supported for checksum offload and can be called for each packet. If a
170 * packet does not match the specification for offload, skb_checksum_help
171 * is called to resolve the checksum.
172 *
173 * CHECKSUM_NONE:
174 *
175 * The skb was already checksummed by the protocol, or a checksum is not
176 * required.
177 *
178 * CHECKSUM_UNNECESSARY:
179 *
180 * This has the same meaning on as CHECKSUM_NONE for checksum offload on
181 * output.
182 *
183 * CHECKSUM_COMPLETE:
184 * Not used in checksum output. If a driver observes a packet with this value
185 * set in skbuff, if should treat as CHECKSUM_NONE being set.
186 *
187 * D. Non-IP checksum (CRC) offloads
188 *
189 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
190 * offloading the SCTP CRC in a packet. To perform this offload the stack
191 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
192 * accordingly. Note the there is no indication in the skbuff that the
193 * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
194 * both IP checksum offload and SCTP CRC offload must verify which offload
195 * is configured for a packet presumably by inspecting packet headers.
196 *
197 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
198 * offloading the FCOE CRC in a packet. To perform this offload the stack
199 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
200 * accordingly. Note the there is no indication in the skbuff that the
201 * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
202 * both IP checksum offload and FCOE CRC offload must verify which offload
203 * is configured for a packet presumably by inspecting packet headers.
204 *
205 * E. Checksumming on output with GSO.
206 *
207 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
208 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
209 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
210 * part of the GSO operation is implied. If a checksum is being offloaded
211 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
212 * are set to refer to the outermost checksum being offload (two offloaded
213 * checksums are possible with UDP encapsulation).
214 */
215
216 /* Don't change this without changing skb_csum_unnecessary! */
217 #define CHECKSUM_NONE 0
218 #define CHECKSUM_UNNECESSARY 1
219 #define CHECKSUM_COMPLETE 2
220 #define CHECKSUM_PARTIAL 3
221
222 /* Maximum value in skb->csum_level */
223 #define SKB_MAX_CSUM_LEVEL 3
224
225 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
226 #define SKB_WITH_OVERHEAD(X) \
227 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
228 #define SKB_MAX_ORDER(X, ORDER) \
229 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
230 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
231 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
232
233 /* return minimum truesize of one skb containing X bytes of data */
234 #define SKB_TRUESIZE(X) ((X) + \
235 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
236 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
237
238 struct net_device;
239 struct scatterlist;
240 struct pipe_inode_info;
241 struct iov_iter;
242 struct napi_struct;
243
244 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
245 struct nf_conntrack {
246 atomic_t use;
247 };
248 #endif
249
250 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
251 struct nf_bridge_info {
252 atomic_t use;
253 enum {
254 BRNF_PROTO_UNCHANGED,
255 BRNF_PROTO_8021Q,
256 BRNF_PROTO_PPPOE
257 } orig_proto:8;
258 u8 pkt_otherhost:1;
259 u8 in_prerouting:1;
260 u8 bridged_dnat:1;
261 __u16 frag_max_size;
262 struct net_device *physindev;
263
264 /* always valid & non-NULL from FORWARD on, for physdev match */
265 struct net_device *physoutdev;
266 union {
267 /* prerouting: detect dnat in orig/reply direction */
268 __be32 ipv4_daddr;
269 struct in6_addr ipv6_daddr;
270
271 /* after prerouting + nat detected: store original source
272 * mac since neigh resolution overwrites it, only used while
273 * skb is out in neigh layer.
274 */
275 char neigh_header[8];
276 };
277 };
278 #endif
279
280 struct sk_buff_head {
281 /* These two members must be first. */
282 struct sk_buff *next;
283 struct sk_buff *prev;
284
285 __u32 qlen;
286 spinlock_t lock;
287 };
288
289 struct sk_buff;
290
291 /* To allow 64K frame to be packed as single skb without frag_list we
292 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
293 * buffers which do not start on a page boundary.
294 *
295 * Since GRO uses frags we allocate at least 16 regardless of page
296 * size.
297 */
298 #if (65536/PAGE_SIZE + 1) < 16
299 #define MAX_SKB_FRAGS 16UL
300 #else
301 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
302 #endif
303 extern int sysctl_max_skb_frags;
304
305 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
306 * segment using its current segmentation instead.
307 */
308 #define GSO_BY_FRAGS 0xFFFF
309
310 typedef struct skb_frag_struct skb_frag_t;
311
312 struct skb_frag_struct {
313 struct {
314 struct page *p;
315 } page;
316 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
317 __u32 page_offset;
318 __u32 size;
319 #else
320 __u16 page_offset;
321 __u16 size;
322 #endif
323 };
324
325 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
326 {
327 return frag->size;
328 }
329
330 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
331 {
332 frag->size = size;
333 }
334
335 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
336 {
337 frag->size += delta;
338 }
339
340 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
341 {
342 frag->size -= delta;
343 }
344
345 #define HAVE_HW_TIME_STAMP
346
347 /**
348 * struct skb_shared_hwtstamps - hardware time stamps
349 * @hwtstamp: hardware time stamp transformed into duration
350 * since arbitrary point in time
351 *
352 * Software time stamps generated by ktime_get_real() are stored in
353 * skb->tstamp.
354 *
355 * hwtstamps can only be compared against other hwtstamps from
356 * the same device.
357 *
358 * This structure is attached to packets as part of the
359 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
360 */
361 struct skb_shared_hwtstamps {
362 ktime_t hwtstamp;
363 };
364
365 /* Definitions for tx_flags in struct skb_shared_info */
366 enum {
367 /* generate hardware time stamp */
368 SKBTX_HW_TSTAMP = 1 << 0,
369
370 /* generate software time stamp when queueing packet to NIC */
371 SKBTX_SW_TSTAMP = 1 << 1,
372
373 /* device driver is going to provide hardware time stamp */
374 SKBTX_IN_PROGRESS = 1 << 2,
375
376 /* device driver supports TX zero-copy buffers */
377 SKBTX_DEV_ZEROCOPY = 1 << 3,
378
379 /* generate wifi status information (where possible) */
380 SKBTX_WIFI_STATUS = 1 << 4,
381
382 /* This indicates at least one fragment might be overwritten
383 * (as in vmsplice(), sendfile() ...)
384 * If we need to compute a TX checksum, we'll need to copy
385 * all frags to avoid possible bad checksum
386 */
387 SKBTX_SHARED_FRAG = 1 << 5,
388
389 /* generate software time stamp when entering packet scheduling */
390 SKBTX_SCHED_TSTAMP = 1 << 6,
391 };
392
393 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
394 SKBTX_SCHED_TSTAMP)
395 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
396
397 /*
398 * The callback notifies userspace to release buffers when skb DMA is done in
399 * lower device, the skb last reference should be 0 when calling this.
400 * The zerocopy_success argument is true if zero copy transmit occurred,
401 * false on data copy or out of memory error caused by data copy attempt.
402 * The ctx field is used to track device context.
403 * The desc field is used to track userspace buffer index.
404 */
405 struct ubuf_info {
406 void (*callback)(struct ubuf_info *, bool zerocopy_success);
407 void *ctx;
408 unsigned long desc;
409 };
410
411 /* This data is invariant across clones and lives at
412 * the end of the header data, ie. at skb->end.
413 */
414 struct skb_shared_info {
415 unsigned char nr_frags;
416 __u8 tx_flags;
417 unsigned short gso_size;
418 /* Warning: this field is not always filled in (UFO)! */
419 unsigned short gso_segs;
420 unsigned short gso_type;
421 struct sk_buff *frag_list;
422 struct skb_shared_hwtstamps hwtstamps;
423 u32 tskey;
424 __be32 ip6_frag_id;
425
426 /*
427 * Warning : all fields before dataref are cleared in __alloc_skb()
428 */
429 atomic_t dataref;
430
431 /* Intermediate layers must ensure that destructor_arg
432 * remains valid until skb destructor */
433 void * destructor_arg;
434
435 /* must be last field, see pskb_expand_head() */
436 skb_frag_t frags[MAX_SKB_FRAGS];
437 };
438
439 /* We divide dataref into two halves. The higher 16 bits hold references
440 * to the payload part of skb->data. The lower 16 bits hold references to
441 * the entire skb->data. A clone of a headerless skb holds the length of
442 * the header in skb->hdr_len.
443 *
444 * All users must obey the rule that the skb->data reference count must be
445 * greater than or equal to the payload reference count.
446 *
447 * Holding a reference to the payload part means that the user does not
448 * care about modifications to the header part of skb->data.
449 */
450 #define SKB_DATAREF_SHIFT 16
451 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
452
453
454 enum {
455 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
456 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
457 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
458 };
459
460 enum {
461 SKB_GSO_TCPV4 = 1 << 0,
462 SKB_GSO_UDP = 1 << 1,
463
464 /* This indicates the skb is from an untrusted source. */
465 SKB_GSO_DODGY = 1 << 2,
466
467 /* This indicates the tcp segment has CWR set. */
468 SKB_GSO_TCP_ECN = 1 << 3,
469
470 SKB_GSO_TCP_FIXEDID = 1 << 4,
471
472 SKB_GSO_TCPV6 = 1 << 5,
473
474 SKB_GSO_FCOE = 1 << 6,
475
476 SKB_GSO_GRE = 1 << 7,
477
478 SKB_GSO_GRE_CSUM = 1 << 8,
479
480 SKB_GSO_IPXIP4 = 1 << 9,
481
482 SKB_GSO_IPXIP6 = 1 << 10,
483
484 SKB_GSO_UDP_TUNNEL = 1 << 11,
485
486 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
487
488 SKB_GSO_PARTIAL = 1 << 13,
489
490 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
491
492 SKB_GSO_SCTP = 1 << 15,
493 };
494
495 #if BITS_PER_LONG > 32
496 #define NET_SKBUFF_DATA_USES_OFFSET 1
497 #endif
498
499 #ifdef NET_SKBUFF_DATA_USES_OFFSET
500 typedef unsigned int sk_buff_data_t;
501 #else
502 typedef unsigned char *sk_buff_data_t;
503 #endif
504
505 /**
506 * struct skb_mstamp - multi resolution time stamps
507 * @stamp_us: timestamp in us resolution
508 * @stamp_jiffies: timestamp in jiffies
509 */
510 struct skb_mstamp {
511 union {
512 u64 v64;
513 struct {
514 u32 stamp_us;
515 u32 stamp_jiffies;
516 };
517 };
518 };
519
520 /**
521 * skb_mstamp_get - get current timestamp
522 * @cl: place to store timestamps
523 */
524 static inline void skb_mstamp_get(struct skb_mstamp *cl)
525 {
526 u64 val = local_clock();
527
528 do_div(val, NSEC_PER_USEC);
529 cl->stamp_us = (u32)val;
530 cl->stamp_jiffies = (u32)jiffies;
531 }
532
533 /**
534 * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
535 * @t1: pointer to newest sample
536 * @t0: pointer to oldest sample
537 */
538 static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
539 const struct skb_mstamp *t0)
540 {
541 s32 delta_us = t1->stamp_us - t0->stamp_us;
542 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
543
544 /* If delta_us is negative, this might be because interval is too big,
545 * or local_clock() drift is too big : fallback using jiffies.
546 */
547 if (delta_us <= 0 ||
548 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
549
550 delta_us = jiffies_to_usecs(delta_jiffies);
551
552 return delta_us;
553 }
554
555 static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
556 const struct skb_mstamp *t0)
557 {
558 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
559
560 if (!diff)
561 diff = t1->stamp_us - t0->stamp_us;
562 return diff > 0;
563 }
564
565 /**
566 * struct sk_buff - socket buffer
567 * @next: Next buffer in list
568 * @prev: Previous buffer in list
569 * @tstamp: Time we arrived/left
570 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
571 * @sk: Socket we are owned by
572 * @dev: Device we arrived on/are leaving by
573 * @cb: Control buffer. Free for use by every layer. Put private vars here
574 * @_skb_refdst: destination entry (with norefcount bit)
575 * @sp: the security path, used for xfrm
576 * @len: Length of actual data
577 * @data_len: Data length
578 * @mac_len: Length of link layer header
579 * @hdr_len: writable header length of cloned skb
580 * @csum: Checksum (must include start/offset pair)
581 * @csum_start: Offset from skb->head where checksumming should start
582 * @csum_offset: Offset from csum_start where checksum should be stored
583 * @priority: Packet queueing priority
584 * @ignore_df: allow local fragmentation
585 * @cloned: Head may be cloned (check refcnt to be sure)
586 * @ip_summed: Driver fed us an IP checksum
587 * @nohdr: Payload reference only, must not modify header
588 * @nfctinfo: Relationship of this skb to the connection
589 * @pkt_type: Packet class
590 * @fclone: skbuff clone status
591 * @ipvs_property: skbuff is owned by ipvs
592 * @peeked: this packet has been seen already, so stats have been
593 * done for it, don't do them again
594 * @nf_trace: netfilter packet trace flag
595 * @protocol: Packet protocol from driver
596 * @destructor: Destruct function
597 * @nfct: Associated connection, if any
598 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
599 * @skb_iif: ifindex of device we arrived on
600 * @tc_index: Traffic control index
601 * @tc_verd: traffic control verdict
602 * @hash: the packet hash
603 * @queue_mapping: Queue mapping for multiqueue devices
604 * @xmit_more: More SKBs are pending for this queue
605 * @ndisc_nodetype: router type (from link layer)
606 * @ooo_okay: allow the mapping of a socket to a queue to be changed
607 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
608 * ports.
609 * @sw_hash: indicates hash was computed in software stack
610 * @wifi_acked_valid: wifi_acked was set
611 * @wifi_acked: whether frame was acked on wifi or not
612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
613 * @napi_id: id of the NAPI struct this skb came from
614 * @secmark: security marking
615 * @mark: Generic packet mark
616 * @vlan_proto: vlan encapsulation protocol
617 * @vlan_tci: vlan tag control information
618 * @inner_protocol: Protocol (encapsulation)
619 * @inner_transport_header: Inner transport layer header (encapsulation)
620 * @inner_network_header: Network layer header (encapsulation)
621 * @inner_mac_header: Link layer header (encapsulation)
622 * @transport_header: Transport layer header
623 * @network_header: Network layer header
624 * @mac_header: Link layer header
625 * @tail: Tail pointer
626 * @end: End pointer
627 * @head: Head of buffer
628 * @data: Data head pointer
629 * @truesize: Buffer size
630 * @users: User count - see {datagram,tcp}.c
631 */
632
633 struct sk_buff {
634 union {
635 struct {
636 /* These two members must be first. */
637 struct sk_buff *next;
638 struct sk_buff *prev;
639
640 union {
641 ktime_t tstamp;
642 struct skb_mstamp skb_mstamp;
643 };
644 };
645 struct rb_node rbnode; /* used in netem & tcp stack */
646 };
647 struct sock *sk;
648 struct net_device *dev;
649
650 /*
651 * This is the control buffer. It is free to use for every
652 * layer. Please put your private variables there. If you
653 * want to keep them across layers you have to do a skb_clone()
654 * first. This is owned by whoever has the skb queued ATM.
655 */
656 char cb[48] __aligned(8);
657
658 unsigned long _skb_refdst;
659 void (*destructor)(struct sk_buff *skb);
660 #ifdef CONFIG_XFRM
661 struct sec_path *sp;
662 #endif
663 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
664 struct nf_conntrack *nfct;
665 #endif
666 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
667 struct nf_bridge_info *nf_bridge;
668 #endif
669 unsigned int len,
670 data_len;
671 __u16 mac_len,
672 hdr_len;
673
674 /* Following fields are _not_ copied in __copy_skb_header()
675 * Note that queue_mapping is here mostly to fill a hole.
676 */
677 kmemcheck_bitfield_begin(flags1);
678 __u16 queue_mapping;
679
680 /* if you move cloned around you also must adapt those constants */
681 #ifdef __BIG_ENDIAN_BITFIELD
682 #define CLONED_MASK (1 << 7)
683 #else
684 #define CLONED_MASK 1
685 #endif
686 #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
687
688 __u8 __cloned_offset[0];
689 __u8 cloned:1,
690 nohdr:1,
691 fclone:2,
692 peeked:1,
693 head_frag:1,
694 xmit_more:1,
695 __unused:1; /* one bit hole */
696 kmemcheck_bitfield_end(flags1);
697
698 /* fields enclosed in headers_start/headers_end are copied
699 * using a single memcpy() in __copy_skb_header()
700 */
701 /* private: */
702 __u32 headers_start[0];
703 /* public: */
704
705 /* if you move pkt_type around you also must adapt those constants */
706 #ifdef __BIG_ENDIAN_BITFIELD
707 #define PKT_TYPE_MAX (7 << 5)
708 #else
709 #define PKT_TYPE_MAX 7
710 #endif
711 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
712
713 __u8 __pkt_type_offset[0];
714 __u8 pkt_type:3;
715 __u8 pfmemalloc:1;
716 __u8 ignore_df:1;
717 __u8 nfctinfo:3;
718
719 __u8 nf_trace:1;
720 __u8 ip_summed:2;
721 __u8 ooo_okay:1;
722 __u8 l4_hash:1;
723 __u8 sw_hash:1;
724 __u8 wifi_acked_valid:1;
725 __u8 wifi_acked:1;
726
727 __u8 no_fcs:1;
728 /* Indicates the inner headers are valid in the skbuff. */
729 __u8 encapsulation:1;
730 __u8 encap_hdr_csum:1;
731 __u8 csum_valid:1;
732 __u8 csum_complete_sw:1;
733 __u8 csum_level:2;
734 __u8 csum_bad:1;
735
736 #ifdef CONFIG_IPV6_NDISC_NODETYPE
737 __u8 ndisc_nodetype:2;
738 #endif
739 __u8 ipvs_property:1;
740 __u8 inner_protocol_type:1;
741 __u8 remcsum_offload:1;
742 #ifdef CONFIG_NET_SWITCHDEV
743 __u8 offload_fwd_mark:1;
744 #endif
745 /* 2, 4 or 5 bit hole */
746
747 #ifdef CONFIG_NET_SCHED
748 __u16 tc_index; /* traffic control index */
749 #ifdef CONFIG_NET_CLS_ACT
750 __u16 tc_verd; /* traffic control verdict */
751 #endif
752 #endif
753
754 union {
755 __wsum csum;
756 struct {
757 __u16 csum_start;
758 __u16 csum_offset;
759 };
760 };
761 __u32 priority;
762 int skb_iif;
763 __u32 hash;
764 __be16 vlan_proto;
765 __u16 vlan_tci;
766 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
767 union {
768 unsigned int napi_id;
769 unsigned int sender_cpu;
770 };
771 #endif
772 #ifdef CONFIG_NETWORK_SECMARK
773 __u32 secmark;
774 #endif
775
776 union {
777 __u32 mark;
778 __u32 reserved_tailroom;
779 };
780
781 union {
782 __be16 inner_protocol;
783 __u8 inner_ipproto;
784 };
785
786 __u16 inner_transport_header;
787 __u16 inner_network_header;
788 __u16 inner_mac_header;
789
790 __be16 protocol;
791 __u16 transport_header;
792 __u16 network_header;
793 __u16 mac_header;
794
795 /* private: */
796 __u32 headers_end[0];
797 /* public: */
798
799 /* These elements must be at the end, see alloc_skb() for details. */
800 sk_buff_data_t tail;
801 sk_buff_data_t end;
802 unsigned char *head,
803 *data;
804 unsigned int truesize;
805 atomic_t users;
806 };
807
808 #ifdef __KERNEL__
809 /*
810 * Handling routines are only of interest to the kernel
811 */
812 #include <linux/slab.h>
813
814
815 #define SKB_ALLOC_FCLONE 0x01
816 #define SKB_ALLOC_RX 0x02
817 #define SKB_ALLOC_NAPI 0x04
818
819 /* Returns true if the skb was allocated from PFMEMALLOC reserves */
820 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
821 {
822 return unlikely(skb->pfmemalloc);
823 }
824
825 /*
826 * skb might have a dst pointer attached, refcounted or not.
827 * _skb_refdst low order bit is set if refcount was _not_ taken
828 */
829 #define SKB_DST_NOREF 1UL
830 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
831
832 /**
833 * skb_dst - returns skb dst_entry
834 * @skb: buffer
835 *
836 * Returns skb dst_entry, regardless of reference taken or not.
837 */
838 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
839 {
840 /* If refdst was not refcounted, check we still are in a
841 * rcu_read_lock section
842 */
843 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
844 !rcu_read_lock_held() &&
845 !rcu_read_lock_bh_held());
846 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
847 }
848
849 /**
850 * skb_dst_set - sets skb dst
851 * @skb: buffer
852 * @dst: dst entry
853 *
854 * Sets skb dst, assuming a reference was taken on dst and should
855 * be released by skb_dst_drop()
856 */
857 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
858 {
859 skb->_skb_refdst = (unsigned long)dst;
860 }
861
862 /**
863 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
864 * @skb: buffer
865 * @dst: dst entry
866 *
867 * Sets skb dst, assuming a reference was not taken on dst.
868 * If dst entry is cached, we do not take reference and dst_release
869 * will be avoided by refdst_drop. If dst entry is not cached, we take
870 * reference, so that last dst_release can destroy the dst immediately.
871 */
872 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
873 {
874 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
875 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
876 }
877
878 /**
879 * skb_dst_is_noref - Test if skb dst isn't refcounted
880 * @skb: buffer
881 */
882 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
883 {
884 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
885 }
886
887 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
888 {
889 return (struct rtable *)skb_dst(skb);
890 }
891
892 /* For mangling skb->pkt_type from user space side from applications
893 * such as nft, tc, etc, we only allow a conservative subset of
894 * possible pkt_types to be set.
895 */
896 static inline bool skb_pkt_type_ok(u32 ptype)
897 {
898 return ptype <= PACKET_OTHERHOST;
899 }
900
901 void kfree_skb(struct sk_buff *skb);
902 void kfree_skb_list(struct sk_buff *segs);
903 void skb_tx_error(struct sk_buff *skb);
904 void consume_skb(struct sk_buff *skb);
905 void __kfree_skb(struct sk_buff *skb);
906 extern struct kmem_cache *skbuff_head_cache;
907
908 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
909 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
910 bool *fragstolen, int *delta_truesize);
911
912 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
913 int node);
914 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
915 struct sk_buff *build_skb(void *data, unsigned int frag_size);
916 static inline struct sk_buff *alloc_skb(unsigned int size,
917 gfp_t priority)
918 {
919 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
920 }
921
922 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
923 unsigned long data_len,
924 int max_page_order,
925 int *errcode,
926 gfp_t gfp_mask);
927
928 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
929 struct sk_buff_fclones {
930 struct sk_buff skb1;
931
932 struct sk_buff skb2;
933
934 atomic_t fclone_ref;
935 };
936
937 /**
938 * skb_fclone_busy - check if fclone is busy
939 * @skb: buffer
940 *
941 * Returns true if skb is a fast clone, and its clone is not freed.
942 * Some drivers call skb_orphan() in their ndo_start_xmit(),
943 * so we also check that this didnt happen.
944 */
945 static inline bool skb_fclone_busy(const struct sock *sk,
946 const struct sk_buff *skb)
947 {
948 const struct sk_buff_fclones *fclones;
949
950 fclones = container_of(skb, struct sk_buff_fclones, skb1);
951
952 return skb->fclone == SKB_FCLONE_ORIG &&
953 atomic_read(&fclones->fclone_ref) > 1 &&
954 fclones->skb2.sk == sk;
955 }
956
957 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
958 gfp_t priority)
959 {
960 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
961 }
962
963 struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
964 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
965 {
966 return __alloc_skb_head(priority, -1);
967 }
968
969 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
970 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
971 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
972 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
973 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
974 gfp_t gfp_mask, bool fclone);
975 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
976 gfp_t gfp_mask)
977 {
978 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
979 }
980
981 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
982 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
983 unsigned int headroom);
984 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
985 int newtailroom, gfp_t priority);
986 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
987 int offset, int len);
988 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
989 int len);
990 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
991 int skb_pad(struct sk_buff *skb, int pad);
992 #define dev_kfree_skb(a) consume_skb(a)
993
994 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
995 int getfrag(void *from, char *to, int offset,
996 int len, int odd, struct sk_buff *skb),
997 void *from, int length);
998
999 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1000 int offset, size_t size);
1001
1002 struct skb_seq_state {
1003 __u32 lower_offset;
1004 __u32 upper_offset;
1005 __u32 frag_idx;
1006 __u32 stepped_offset;
1007 struct sk_buff *root_skb;
1008 struct sk_buff *cur_skb;
1009 __u8 *frag_data;
1010 };
1011
1012 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1013 unsigned int to, struct skb_seq_state *st);
1014 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1015 struct skb_seq_state *st);
1016 void skb_abort_seq_read(struct skb_seq_state *st);
1017
1018 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1019 unsigned int to, struct ts_config *config);
1020
1021 /*
1022 * Packet hash types specify the type of hash in skb_set_hash.
1023 *
1024 * Hash types refer to the protocol layer addresses which are used to
1025 * construct a packet's hash. The hashes are used to differentiate or identify
1026 * flows of the protocol layer for the hash type. Hash types are either
1027 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1028 *
1029 * Properties of hashes:
1030 *
1031 * 1) Two packets in different flows have different hash values
1032 * 2) Two packets in the same flow should have the same hash value
1033 *
1034 * A hash at a higher layer is considered to be more specific. A driver should
1035 * set the most specific hash possible.
1036 *
1037 * A driver cannot indicate a more specific hash than the layer at which a hash
1038 * was computed. For instance an L3 hash cannot be set as an L4 hash.
1039 *
1040 * A driver may indicate a hash level which is less specific than the
1041 * actual layer the hash was computed on. For instance, a hash computed
1042 * at L4 may be considered an L3 hash. This should only be done if the
1043 * driver can't unambiguously determine that the HW computed the hash at
1044 * the higher layer. Note that the "should" in the second property above
1045 * permits this.
1046 */
1047 enum pkt_hash_types {
1048 PKT_HASH_TYPE_NONE, /* Undefined type */
1049 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
1050 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
1051 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
1052 };
1053
1054 static inline void skb_clear_hash(struct sk_buff *skb)
1055 {
1056 skb->hash = 0;
1057 skb->sw_hash = 0;
1058 skb->l4_hash = 0;
1059 }
1060
1061 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1062 {
1063 if (!skb->l4_hash)
1064 skb_clear_hash(skb);
1065 }
1066
1067 static inline void
1068 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1069 {
1070 skb->l4_hash = is_l4;
1071 skb->sw_hash = is_sw;
1072 skb->hash = hash;
1073 }
1074
1075 static inline void
1076 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1077 {
1078 /* Used by drivers to set hash from HW */
1079 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1080 }
1081
1082 static inline void
1083 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1084 {
1085 __skb_set_hash(skb, hash, true, is_l4);
1086 }
1087
1088 void __skb_get_hash(struct sk_buff *skb);
1089 u32 __skb_get_hash_symmetric(struct sk_buff *skb);
1090 u32 skb_get_poff(const struct sk_buff *skb);
1091 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1092 const struct flow_keys *keys, int hlen);
1093 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1094 void *data, int hlen_proto);
1095
1096 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1097 int thoff, u8 ip_proto)
1098 {
1099 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1100 }
1101
1102 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1103 const struct flow_dissector_key *key,
1104 unsigned int key_count);
1105
1106 bool __skb_flow_dissect(const struct sk_buff *skb,
1107 struct flow_dissector *flow_dissector,
1108 void *target_container,
1109 void *data, __be16 proto, int nhoff, int hlen,
1110 unsigned int flags);
1111
1112 static inline bool skb_flow_dissect(const struct sk_buff *skb,
1113 struct flow_dissector *flow_dissector,
1114 void *target_container, unsigned int flags)
1115 {
1116 return __skb_flow_dissect(skb, flow_dissector, target_container,
1117 NULL, 0, 0, 0, flags);
1118 }
1119
1120 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1121 struct flow_keys *flow,
1122 unsigned int flags)
1123 {
1124 memset(flow, 0, sizeof(*flow));
1125 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1126 NULL, 0, 0, 0, flags);
1127 }
1128
1129 static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1130 void *data, __be16 proto,
1131 int nhoff, int hlen,
1132 unsigned int flags)
1133 {
1134 memset(flow, 0, sizeof(*flow));
1135 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1136 data, proto, nhoff, hlen, flags);
1137 }
1138
1139 static inline __u32 skb_get_hash(struct sk_buff *skb)
1140 {
1141 if (!skb->l4_hash && !skb->sw_hash)
1142 __skb_get_hash(skb);
1143
1144 return skb->hash;
1145 }
1146
1147 __u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1148
1149 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1150 {
1151 if (!skb->l4_hash && !skb->sw_hash) {
1152 struct flow_keys keys;
1153 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1154
1155 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1156 }
1157
1158 return skb->hash;
1159 }
1160
1161 __u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1162
1163 static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1164 {
1165 if (!skb->l4_hash && !skb->sw_hash) {
1166 struct flow_keys keys;
1167 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1168
1169 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1170 }
1171
1172 return skb->hash;
1173 }
1174
1175 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1176
1177 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1178 {
1179 return skb->hash;
1180 }
1181
1182 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1183 {
1184 to->hash = from->hash;
1185 to->sw_hash = from->sw_hash;
1186 to->l4_hash = from->l4_hash;
1187 };
1188
1189 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1190 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1191 {
1192 return skb->head + skb->end;
1193 }
1194
1195 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1196 {
1197 return skb->end;
1198 }
1199 #else
1200 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1201 {
1202 return skb->end;
1203 }
1204
1205 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1206 {
1207 return skb->end - skb->head;
1208 }
1209 #endif
1210
1211 /* Internal */
1212 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1213
1214 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1215 {
1216 return &skb_shinfo(skb)->hwtstamps;
1217 }
1218
1219 /**
1220 * skb_queue_empty - check if a queue is empty
1221 * @list: queue head
1222 *
1223 * Returns true if the queue is empty, false otherwise.
1224 */
1225 static inline int skb_queue_empty(const struct sk_buff_head *list)
1226 {
1227 return list->next == (const struct sk_buff *) list;
1228 }
1229
1230 /**
1231 * skb_queue_is_last - check if skb is the last entry in the queue
1232 * @list: queue head
1233 * @skb: buffer
1234 *
1235 * Returns true if @skb is the last buffer on the list.
1236 */
1237 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1238 const struct sk_buff *skb)
1239 {
1240 return skb->next == (const struct sk_buff *) list;
1241 }
1242
1243 /**
1244 * skb_queue_is_first - check if skb is the first entry in the queue
1245 * @list: queue head
1246 * @skb: buffer
1247 *
1248 * Returns true if @skb is the first buffer on the list.
1249 */
1250 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1251 const struct sk_buff *skb)
1252 {
1253 return skb->prev == (const struct sk_buff *) list;
1254 }
1255
1256 /**
1257 * skb_queue_next - return the next packet in the queue
1258 * @list: queue head
1259 * @skb: current buffer
1260 *
1261 * Return the next packet in @list after @skb. It is only valid to
1262 * call this if skb_queue_is_last() evaluates to false.
1263 */
1264 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1265 const struct sk_buff *skb)
1266 {
1267 /* This BUG_ON may seem severe, but if we just return then we
1268 * are going to dereference garbage.
1269 */
1270 BUG_ON(skb_queue_is_last(list, skb));
1271 return skb->next;
1272 }
1273
1274 /**
1275 * skb_queue_prev - return the prev packet in the queue
1276 * @list: queue head
1277 * @skb: current buffer
1278 *
1279 * Return the prev packet in @list before @skb. It is only valid to
1280 * call this if skb_queue_is_first() evaluates to false.
1281 */
1282 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1283 const struct sk_buff *skb)
1284 {
1285 /* This BUG_ON may seem severe, but if we just return then we
1286 * are going to dereference garbage.
1287 */
1288 BUG_ON(skb_queue_is_first(list, skb));
1289 return skb->prev;
1290 }
1291
1292 /**
1293 * skb_get - reference buffer
1294 * @skb: buffer to reference
1295 *
1296 * Makes another reference to a socket buffer and returns a pointer
1297 * to the buffer.
1298 */
1299 static inline struct sk_buff *skb_get(struct sk_buff *skb)
1300 {
1301 atomic_inc(&skb->users);
1302 return skb;
1303 }
1304
1305 /*
1306 * If users == 1, we are the only owner and are can avoid redundant
1307 * atomic change.
1308 */
1309
1310 /**
1311 * skb_cloned - is the buffer a clone
1312 * @skb: buffer to check
1313 *
1314 * Returns true if the buffer was generated with skb_clone() and is
1315 * one of multiple shared copies of the buffer. Cloned buffers are
1316 * shared data so must not be written to under normal circumstances.
1317 */
1318 static inline int skb_cloned(const struct sk_buff *skb)
1319 {
1320 return skb->cloned &&
1321 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1322 }
1323
1324 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1325 {
1326 might_sleep_if(gfpflags_allow_blocking(pri));
1327
1328 if (skb_cloned(skb))
1329 return pskb_expand_head(skb, 0, 0, pri);
1330
1331 return 0;
1332 }
1333
1334 /**
1335 * skb_header_cloned - is the header a clone
1336 * @skb: buffer to check
1337 *
1338 * Returns true if modifying the header part of the buffer requires
1339 * the data to be copied.
1340 */
1341 static inline int skb_header_cloned(const struct sk_buff *skb)
1342 {
1343 int dataref;
1344
1345 if (!skb->cloned)
1346 return 0;
1347
1348 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1349 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1350 return dataref != 1;
1351 }
1352
1353 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1354 {
1355 might_sleep_if(gfpflags_allow_blocking(pri));
1356
1357 if (skb_header_cloned(skb))
1358 return pskb_expand_head(skb, 0, 0, pri);
1359
1360 return 0;
1361 }
1362
1363 /**
1364 * skb_header_release - release reference to header
1365 * @skb: buffer to operate on
1366 *
1367 * Drop a reference to the header part of the buffer. This is done
1368 * by acquiring a payload reference. You must not read from the header
1369 * part of skb->data after this.
1370 * Note : Check if you can use __skb_header_release() instead.
1371 */
1372 static inline void skb_header_release(struct sk_buff *skb)
1373 {
1374 BUG_ON(skb->nohdr);
1375 skb->nohdr = 1;
1376 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1377 }
1378
1379 /**
1380 * __skb_header_release - release reference to header
1381 * @skb: buffer to operate on
1382 *
1383 * Variant of skb_header_release() assuming skb is private to caller.
1384 * We can avoid one atomic operation.
1385 */
1386 static inline void __skb_header_release(struct sk_buff *skb)
1387 {
1388 skb->nohdr = 1;
1389 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1390 }
1391
1392
1393 /**
1394 * skb_shared - is the buffer shared
1395 * @skb: buffer to check
1396 *
1397 * Returns true if more than one person has a reference to this
1398 * buffer.
1399 */
1400 static inline int skb_shared(const struct sk_buff *skb)
1401 {
1402 return atomic_read(&skb->users) != 1;
1403 }
1404
1405 /**
1406 * skb_share_check - check if buffer is shared and if so clone it
1407 * @skb: buffer to check
1408 * @pri: priority for memory allocation
1409 *
1410 * If the buffer is shared the buffer is cloned and the old copy
1411 * drops a reference. A new clone with a single reference is returned.
1412 * If the buffer is not shared the original buffer is returned. When
1413 * being called from interrupt status or with spinlocks held pri must
1414 * be GFP_ATOMIC.
1415 *
1416 * NULL is returned on a memory allocation failure.
1417 */
1418 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1419 {
1420 might_sleep_if(gfpflags_allow_blocking(pri));
1421 if (skb_shared(skb)) {
1422 struct sk_buff *nskb = skb_clone(skb, pri);
1423
1424 if (likely(nskb))
1425 consume_skb(skb);
1426 else
1427 kfree_skb(skb);
1428 skb = nskb;
1429 }
1430 return skb;
1431 }
1432
1433 /*
1434 * Copy shared buffers into a new sk_buff. We effectively do COW on
1435 * packets to handle cases where we have a local reader and forward
1436 * and a couple of other messy ones. The normal one is tcpdumping
1437 * a packet thats being forwarded.
1438 */
1439
1440 /**
1441 * skb_unshare - make a copy of a shared buffer
1442 * @skb: buffer to check
1443 * @pri: priority for memory allocation
1444 *
1445 * If the socket buffer is a clone then this function creates a new
1446 * copy of the data, drops a reference count on the old copy and returns
1447 * the new copy with the reference count at 1. If the buffer is not a clone
1448 * the original buffer is returned. When called with a spinlock held or
1449 * from interrupt state @pri must be %GFP_ATOMIC
1450 *
1451 * %NULL is returned on a memory allocation failure.
1452 */
1453 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1454 gfp_t pri)
1455 {
1456 might_sleep_if(gfpflags_allow_blocking(pri));
1457 if (skb_cloned(skb)) {
1458 struct sk_buff *nskb = skb_copy(skb, pri);
1459
1460 /* Free our shared copy */
1461 if (likely(nskb))
1462 consume_skb(skb);
1463 else
1464 kfree_skb(skb);
1465 skb = nskb;
1466 }
1467 return skb;
1468 }
1469
1470 /**
1471 * skb_peek - peek at the head of an &sk_buff_head
1472 * @list_: list to peek at
1473 *
1474 * Peek an &sk_buff. Unlike most other operations you _MUST_
1475 * be careful with this one. A peek leaves the buffer on the
1476 * list and someone else may run off with it. You must hold
1477 * the appropriate locks or have a private queue to do this.
1478 *
1479 * Returns %NULL for an empty list or a pointer to the head element.
1480 * The reference count is not incremented and the reference is therefore
1481 * volatile. Use with caution.
1482 */
1483 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1484 {
1485 struct sk_buff *skb = list_->next;
1486
1487 if (skb == (struct sk_buff *)list_)
1488 skb = NULL;
1489 return skb;
1490 }
1491
1492 /**
1493 * skb_peek_next - peek skb following the given one from a queue
1494 * @skb: skb to start from
1495 * @list_: list to peek at
1496 *
1497 * Returns %NULL when the end of the list is met or a pointer to the
1498 * next element. The reference count is not incremented and the
1499 * reference is therefore volatile. Use with caution.
1500 */
1501 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1502 const struct sk_buff_head *list_)
1503 {
1504 struct sk_buff *next = skb->next;
1505
1506 if (next == (struct sk_buff *)list_)
1507 next = NULL;
1508 return next;
1509 }
1510
1511 /**
1512 * skb_peek_tail - peek at the tail of an &sk_buff_head
1513 * @list_: list to peek at
1514 *
1515 * Peek an &sk_buff. Unlike most other operations you _MUST_
1516 * be careful with this one. A peek leaves the buffer on the
1517 * list and someone else may run off with it. You must hold
1518 * the appropriate locks or have a private queue to do this.
1519 *
1520 * Returns %NULL for an empty list or a pointer to the tail element.
1521 * The reference count is not incremented and the reference is therefore
1522 * volatile. Use with caution.
1523 */
1524 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1525 {
1526 struct sk_buff *skb = list_->prev;
1527
1528 if (skb == (struct sk_buff *)list_)
1529 skb = NULL;
1530 return skb;
1531
1532 }
1533
1534 /**
1535 * skb_queue_len - get queue length
1536 * @list_: list to measure
1537 *
1538 * Return the length of an &sk_buff queue.
1539 */
1540 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1541 {
1542 return list_->qlen;
1543 }
1544
1545 /**
1546 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1547 * @list: queue to initialize
1548 *
1549 * This initializes only the list and queue length aspects of
1550 * an sk_buff_head object. This allows to initialize the list
1551 * aspects of an sk_buff_head without reinitializing things like
1552 * the spinlock. It can also be used for on-stack sk_buff_head
1553 * objects where the spinlock is known to not be used.
1554 */
1555 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1556 {
1557 list->prev = list->next = (struct sk_buff *)list;
1558 list->qlen = 0;
1559 }
1560
1561 /*
1562 * This function creates a split out lock class for each invocation;
1563 * this is needed for now since a whole lot of users of the skb-queue
1564 * infrastructure in drivers have different locking usage (in hardirq)
1565 * than the networking core (in softirq only). In the long run either the
1566 * network layer or drivers should need annotation to consolidate the
1567 * main types of usage into 3 classes.
1568 */
1569 static inline void skb_queue_head_init(struct sk_buff_head *list)
1570 {
1571 spin_lock_init(&list->lock);
1572 __skb_queue_head_init(list);
1573 }
1574
1575 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1576 struct lock_class_key *class)
1577 {
1578 skb_queue_head_init(list);
1579 lockdep_set_class(&list->lock, class);
1580 }
1581
1582 /*
1583 * Insert an sk_buff on a list.
1584 *
1585 * The "__skb_xxxx()" functions are the non-atomic ones that
1586 * can only be called with interrupts disabled.
1587 */
1588 void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1589 struct sk_buff_head *list);
1590 static inline void __skb_insert(struct sk_buff *newsk,
1591 struct sk_buff *prev, struct sk_buff *next,
1592 struct sk_buff_head *list)
1593 {
1594 newsk->next = next;
1595 newsk->prev = prev;
1596 next->prev = prev->next = newsk;
1597 list->qlen++;
1598 }
1599
1600 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1601 struct sk_buff *prev,
1602 struct sk_buff *next)
1603 {
1604 struct sk_buff *first = list->next;
1605 struct sk_buff *last = list->prev;
1606
1607 first->prev = prev;
1608 prev->next = first;
1609
1610 last->next = next;
1611 next->prev = last;
1612 }
1613
1614 /**
1615 * skb_queue_splice - join two skb lists, this is designed for stacks
1616 * @list: the new list to add
1617 * @head: the place to add it in the first list
1618 */
1619 static inline void skb_queue_splice(const struct sk_buff_head *list,
1620 struct sk_buff_head *head)
1621 {
1622 if (!skb_queue_empty(list)) {
1623 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1624 head->qlen += list->qlen;
1625 }
1626 }
1627
1628 /**
1629 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1630 * @list: the new list to add
1631 * @head: the place to add it in the first list
1632 *
1633 * The list at @list is reinitialised
1634 */
1635 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1636 struct sk_buff_head *head)
1637 {
1638 if (!skb_queue_empty(list)) {
1639 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1640 head->qlen += list->qlen;
1641 __skb_queue_head_init(list);
1642 }
1643 }
1644
1645 /**
1646 * skb_queue_splice_tail - join two skb lists, each list being a queue
1647 * @list: the new list to add
1648 * @head: the place to add it in the first list
1649 */
1650 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1651 struct sk_buff_head *head)
1652 {
1653 if (!skb_queue_empty(list)) {
1654 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1655 head->qlen += list->qlen;
1656 }
1657 }
1658
1659 /**
1660 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1661 * @list: the new list to add
1662 * @head: the place to add it in the first list
1663 *
1664 * Each of the lists is a queue.
1665 * The list at @list is reinitialised
1666 */
1667 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1668 struct sk_buff_head *head)
1669 {
1670 if (!skb_queue_empty(list)) {
1671 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1672 head->qlen += list->qlen;
1673 __skb_queue_head_init(list);
1674 }
1675 }
1676
1677 /**
1678 * __skb_queue_after - queue a buffer at the list head
1679 * @list: list to use
1680 * @prev: place after this buffer
1681 * @newsk: buffer to queue
1682 *
1683 * Queue a buffer int the middle of a list. This function takes no locks
1684 * and you must therefore hold required locks before calling it.
1685 *
1686 * A buffer cannot be placed on two lists at the same time.
1687 */
1688 static inline void __skb_queue_after(struct sk_buff_head *list,
1689 struct sk_buff *prev,
1690 struct sk_buff *newsk)
1691 {
1692 __skb_insert(newsk, prev, prev->next, list);
1693 }
1694
1695 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1696 struct sk_buff_head *list);
1697
1698 static inline void __skb_queue_before(struct sk_buff_head *list,
1699 struct sk_buff *next,
1700 struct sk_buff *newsk)
1701 {
1702 __skb_insert(newsk, next->prev, next, list);
1703 }
1704
1705 /**
1706 * __skb_queue_head - queue a buffer at the list head
1707 * @list: list to use
1708 * @newsk: buffer to queue
1709 *
1710 * Queue a buffer at the start of a list. This function takes no locks
1711 * and you must therefore hold required locks before calling it.
1712 *
1713 * A buffer cannot be placed on two lists at the same time.
1714 */
1715 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1716 static inline void __skb_queue_head(struct sk_buff_head *list,
1717 struct sk_buff *newsk)
1718 {
1719 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1720 }
1721
1722 /**
1723 * __skb_queue_tail - queue a buffer at the list tail
1724 * @list: list to use
1725 * @newsk: buffer to queue
1726 *
1727 * Queue a buffer at the end of a list. This function takes no locks
1728 * and you must therefore hold required locks before calling it.
1729 *
1730 * A buffer cannot be placed on two lists at the same time.
1731 */
1732 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1733 static inline void __skb_queue_tail(struct sk_buff_head *list,
1734 struct sk_buff *newsk)
1735 {
1736 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1737 }
1738
1739 /*
1740 * remove sk_buff from list. _Must_ be called atomically, and with
1741 * the list known..
1742 */
1743 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1744 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1745 {
1746 struct sk_buff *next, *prev;
1747
1748 list->qlen--;
1749 next = skb->next;
1750 prev = skb->prev;
1751 skb->next = skb->prev = NULL;
1752 next->prev = prev;
1753 prev->next = next;
1754 }
1755
1756 /**
1757 * __skb_dequeue - remove from the head of the queue
1758 * @list: list to dequeue from
1759 *
1760 * Remove the head of the list. This function does not take any locks
1761 * so must be used with appropriate locks held only. The head item is
1762 * returned or %NULL if the list is empty.
1763 */
1764 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1765 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1766 {
1767 struct sk_buff *skb = skb_peek(list);
1768 if (skb)
1769 __skb_unlink(skb, list);
1770 return skb;
1771 }
1772
1773 /**
1774 * __skb_dequeue_tail - remove from the tail of the queue
1775 * @list: list to dequeue from
1776 *
1777 * Remove the tail of the list. This function does not take any locks
1778 * so must be used with appropriate locks held only. The tail item is
1779 * returned or %NULL if the list is empty.
1780 */
1781 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1782 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1783 {
1784 struct sk_buff *skb = skb_peek_tail(list);
1785 if (skb)
1786 __skb_unlink(skb, list);
1787 return skb;
1788 }
1789
1790
1791 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1792 {
1793 return skb->data_len;
1794 }
1795
1796 static inline unsigned int skb_headlen(const struct sk_buff *skb)
1797 {
1798 return skb->len - skb->data_len;
1799 }
1800
1801 static inline int skb_pagelen(const struct sk_buff *skb)
1802 {
1803 int i, len = 0;
1804
1805 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1806 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1807 return len + skb_headlen(skb);
1808 }
1809
1810 /**
1811 * __skb_fill_page_desc - initialise a paged fragment in an skb
1812 * @skb: buffer containing fragment to be initialised
1813 * @i: paged fragment index to initialise
1814 * @page: the page to use for this fragment
1815 * @off: the offset to the data with @page
1816 * @size: the length of the data
1817 *
1818 * Initialises the @i'th fragment of @skb to point to &size bytes at
1819 * offset @off within @page.
1820 *
1821 * Does not take any additional reference on the fragment.
1822 */
1823 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1824 struct page *page, int off, int size)
1825 {
1826 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1827
1828 /*
1829 * Propagate page pfmemalloc to the skb if we can. The problem is
1830 * that not all callers have unique ownership of the page but rely
1831 * on page_is_pfmemalloc doing the right thing(tm).
1832 */
1833 frag->page.p = page;
1834 frag->page_offset = off;
1835 skb_frag_size_set(frag, size);
1836
1837 page = compound_head(page);
1838 if (page_is_pfmemalloc(page))
1839 skb->pfmemalloc = true;
1840 }
1841
1842 /**
1843 * skb_fill_page_desc - initialise a paged fragment in an skb
1844 * @skb: buffer containing fragment to be initialised
1845 * @i: paged fragment index to initialise
1846 * @page: the page to use for this fragment
1847 * @off: the offset to the data with @page
1848 * @size: the length of the data
1849 *
1850 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1851 * @skb to point to @size bytes at offset @off within @page. In
1852 * addition updates @skb such that @i is the last fragment.
1853 *
1854 * Does not take any additional reference on the fragment.
1855 */
1856 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1857 struct page *page, int off, int size)
1858 {
1859 __skb_fill_page_desc(skb, i, page, off, size);
1860 skb_shinfo(skb)->nr_frags = i + 1;
1861 }
1862
1863 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1864 int size, unsigned int truesize);
1865
1866 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1867 unsigned int truesize);
1868
1869 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1870 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1871 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1872
1873 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1874 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1875 {
1876 return skb->head + skb->tail;
1877 }
1878
1879 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1880 {
1881 skb->tail = skb->data - skb->head;
1882 }
1883
1884 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1885 {
1886 skb_reset_tail_pointer(skb);
1887 skb->tail += offset;
1888 }
1889
1890 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1891 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1892 {
1893 return skb->tail;
1894 }
1895
1896 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1897 {
1898 skb->tail = skb->data;
1899 }
1900
1901 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1902 {
1903 skb->tail = skb->data + offset;
1904 }
1905
1906 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1907
1908 /*
1909 * Add data to an sk_buff
1910 */
1911 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1912 unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1913 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1914 {
1915 unsigned char *tmp = skb_tail_pointer(skb);
1916 SKB_LINEAR_ASSERT(skb);
1917 skb->tail += len;
1918 skb->len += len;
1919 return tmp;
1920 }
1921
1922 unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1923 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1924 {
1925 skb->data -= len;
1926 skb->len += len;
1927 return skb->data;
1928 }
1929
1930 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1931 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1932 {
1933 skb->len -= len;
1934 BUG_ON(skb->len < skb->data_len);
1935 return skb->data += len;
1936 }
1937
1938 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1939 {
1940 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1941 }
1942
1943 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1944
1945 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1946 {
1947 if (len > skb_headlen(skb) &&
1948 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1949 return NULL;
1950 skb->len -= len;
1951 return skb->data += len;
1952 }
1953
1954 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1955 {
1956 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1957 }
1958
1959 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1960 {
1961 if (likely(len <= skb_headlen(skb)))
1962 return 1;
1963 if (unlikely(len > skb->len))
1964 return 0;
1965 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1966 }
1967
1968 /**
1969 * skb_headroom - bytes at buffer head
1970 * @skb: buffer to check
1971 *
1972 * Return the number of bytes of free space at the head of an &sk_buff.
1973 */
1974 static inline unsigned int skb_headroom(const struct sk_buff *skb)
1975 {
1976 return skb->data - skb->head;
1977 }
1978
1979 /**
1980 * skb_tailroom - bytes at buffer end
1981 * @skb: buffer to check
1982 *
1983 * Return the number of bytes of free space at the tail of an sk_buff
1984 */
1985 static inline int skb_tailroom(const struct sk_buff *skb)
1986 {
1987 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1988 }
1989
1990 /**
1991 * skb_availroom - bytes at buffer end
1992 * @skb: buffer to check
1993 *
1994 * Return the number of bytes of free space at the tail of an sk_buff
1995 * allocated by sk_stream_alloc()
1996 */
1997 static inline int skb_availroom(const struct sk_buff *skb)
1998 {
1999 if (skb_is_nonlinear(skb))
2000 return 0;
2001
2002 return skb->end - skb->tail - skb->reserved_tailroom;
2003 }
2004
2005 /**
2006 * skb_reserve - adjust headroom
2007 * @skb: buffer to alter
2008 * @len: bytes to move
2009 *
2010 * Increase the headroom of an empty &sk_buff by reducing the tail
2011 * room. This is only allowed for an empty buffer.
2012 */
2013 static inline void skb_reserve(struct sk_buff *skb, int len)
2014 {
2015 skb->data += len;
2016 skb->tail += len;
2017 }
2018
2019 /**
2020 * skb_tailroom_reserve - adjust reserved_tailroom
2021 * @skb: buffer to alter
2022 * @mtu: maximum amount of headlen permitted
2023 * @needed_tailroom: minimum amount of reserved_tailroom
2024 *
2025 * Set reserved_tailroom so that headlen can be as large as possible but
2026 * not larger than mtu and tailroom cannot be smaller than
2027 * needed_tailroom.
2028 * The required headroom should already have been reserved before using
2029 * this function.
2030 */
2031 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2032 unsigned int needed_tailroom)
2033 {
2034 SKB_LINEAR_ASSERT(skb);
2035 if (mtu < skb_tailroom(skb) - needed_tailroom)
2036 /* use at most mtu */
2037 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2038 else
2039 /* use up to all available space */
2040 skb->reserved_tailroom = needed_tailroom;
2041 }
2042
2043 #define ENCAP_TYPE_ETHER 0
2044 #define ENCAP_TYPE_IPPROTO 1
2045
2046 static inline void skb_set_inner_protocol(struct sk_buff *skb,
2047 __be16 protocol)
2048 {
2049 skb->inner_protocol = protocol;
2050 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2051 }
2052
2053 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2054 __u8 ipproto)
2055 {
2056 skb->inner_ipproto = ipproto;
2057 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2058 }
2059
2060 static inline void skb_reset_inner_headers(struct sk_buff *skb)
2061 {
2062 skb->inner_mac_header = skb->mac_header;
2063 skb->inner_network_header = skb->network_header;
2064 skb->inner_transport_header = skb->transport_header;
2065 }
2066
2067 static inline void skb_reset_mac_len(struct sk_buff *skb)
2068 {
2069 skb->mac_len = skb->network_header - skb->mac_header;
2070 }
2071
2072 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2073 *skb)
2074 {
2075 return skb->head + skb->inner_transport_header;
2076 }
2077
2078 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2079 {
2080 return skb_inner_transport_header(skb) - skb->data;
2081 }
2082
2083 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2084 {
2085 skb->inner_transport_header = skb->data - skb->head;
2086 }
2087
2088 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2089 const int offset)
2090 {
2091 skb_reset_inner_transport_header(skb);
2092 skb->inner_transport_header += offset;
2093 }
2094
2095 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2096 {
2097 return skb->head + skb->inner_network_header;
2098 }
2099
2100 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2101 {
2102 skb->inner_network_header = skb->data - skb->head;
2103 }
2104
2105 static inline void skb_set_inner_network_header(struct sk_buff *skb,
2106 const int offset)
2107 {
2108 skb_reset_inner_network_header(skb);
2109 skb->inner_network_header += offset;
2110 }
2111
2112 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2113 {
2114 return skb->head + skb->inner_mac_header;
2115 }
2116
2117 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2118 {
2119 skb->inner_mac_header = skb->data - skb->head;
2120 }
2121
2122 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2123 const int offset)
2124 {
2125 skb_reset_inner_mac_header(skb);
2126 skb->inner_mac_header += offset;
2127 }
2128 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2129 {
2130 return skb->transport_header != (typeof(skb->transport_header))~0U;
2131 }
2132
2133 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2134 {
2135 return skb->head + skb->transport_header;
2136 }
2137
2138 static inline void skb_reset_transport_header(struct sk_buff *skb)
2139 {
2140 skb->transport_header = skb->data - skb->head;
2141 }
2142
2143 static inline void skb_set_transport_header(struct sk_buff *skb,
2144 const int offset)
2145 {
2146 skb_reset_transport_header(skb);
2147 skb->transport_header += offset;
2148 }
2149
2150 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2151 {
2152 return skb->head + skb->network_header;
2153 }
2154
2155 static inline void skb_reset_network_header(struct sk_buff *skb)
2156 {
2157 skb->network_header = skb->data - skb->head;
2158 }
2159
2160 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2161 {
2162 skb_reset_network_header(skb);
2163 skb->network_header += offset;
2164 }
2165
2166 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2167 {
2168 return skb->head + skb->mac_header;
2169 }
2170
2171 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2172 {
2173 return skb->mac_header != (typeof(skb->mac_header))~0U;
2174 }
2175
2176 static inline void skb_reset_mac_header(struct sk_buff *skb)
2177 {
2178 skb->mac_header = skb->data - skb->head;
2179 }
2180
2181 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2182 {
2183 skb_reset_mac_header(skb);
2184 skb->mac_header += offset;
2185 }
2186
2187 static inline void skb_pop_mac_header(struct sk_buff *skb)
2188 {
2189 skb->mac_header = skb->network_header;
2190 }
2191
2192 static inline void skb_probe_transport_header(struct sk_buff *skb,
2193 const int offset_hint)
2194 {
2195 struct flow_keys keys;
2196
2197 if (skb_transport_header_was_set(skb))
2198 return;
2199 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2200 skb_set_transport_header(skb, keys.control.thoff);
2201 else
2202 skb_set_transport_header(skb, offset_hint);
2203 }
2204
2205 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2206 {
2207 if (skb_mac_header_was_set(skb)) {
2208 const unsigned char *old_mac = skb_mac_header(skb);
2209
2210 skb_set_mac_header(skb, -skb->mac_len);
2211 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2212 }
2213 }
2214
2215 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2216 {
2217 return skb->csum_start - skb_headroom(skb);
2218 }
2219
2220 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2221 {
2222 return skb->head + skb->csum_start;
2223 }
2224
2225 static inline int skb_transport_offset(const struct sk_buff *skb)
2226 {
2227 return skb_transport_header(skb) - skb->data;
2228 }
2229
2230 static inline u32 skb_network_header_len(const struct sk_buff *skb)
2231 {
2232 return skb->transport_header - skb->network_header;
2233 }
2234
2235 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2236 {
2237 return skb->inner_transport_header - skb->inner_network_header;
2238 }
2239
2240 static inline int skb_network_offset(const struct sk_buff *skb)
2241 {
2242 return skb_network_header(skb) - skb->data;
2243 }
2244
2245 static inline int skb_inner_network_offset(const struct sk_buff *skb)
2246 {
2247 return skb_inner_network_header(skb) - skb->data;
2248 }
2249
2250 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2251 {
2252 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2253 }
2254
2255 /*
2256 * CPUs often take a performance hit when accessing unaligned memory
2257 * locations. The actual performance hit varies, it can be small if the
2258 * hardware handles it or large if we have to take an exception and fix it
2259 * in software.
2260 *
2261 * Since an ethernet header is 14 bytes network drivers often end up with
2262 * the IP header at an unaligned offset. The IP header can be aligned by
2263 * shifting the start of the packet by 2 bytes. Drivers should do this
2264 * with:
2265 *
2266 * skb_reserve(skb, NET_IP_ALIGN);
2267 *
2268 * The downside to this alignment of the IP header is that the DMA is now
2269 * unaligned. On some architectures the cost of an unaligned DMA is high
2270 * and this cost outweighs the gains made by aligning the IP header.
2271 *
2272 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
2273 * to be overridden.
2274 */
2275 #ifndef NET_IP_ALIGN
2276 #define NET_IP_ALIGN 2
2277 #endif
2278
2279 /*
2280 * The networking layer reserves some headroom in skb data (via
2281 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
2282 * the header has to grow. In the default case, if the header has to grow
2283 * 32 bytes or less we avoid the reallocation.
2284 *
2285 * Unfortunately this headroom changes the DMA alignment of the resulting
2286 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
2287 * on some architectures. An architecture can override this value,
2288 * perhaps setting it to a cacheline in size (since that will maintain
2289 * cacheline alignment of the DMA). It must be a power of 2.
2290 *
2291 * Various parts of the networking layer expect at least 32 bytes of
2292 * headroom, you should not reduce this.
2293 *
2294 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
2295 * to reduce average number of cache lines per packet.
2296 * get_rps_cpus() for example only access one 64 bytes aligned block :
2297 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2298 */
2299 #ifndef NET_SKB_PAD
2300 #define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2301 #endif
2302
2303 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2304
2305 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2306 {
2307 if (unlikely(skb_is_nonlinear(skb))) {
2308 WARN_ON(1);
2309 return;
2310 }
2311 skb->len = len;
2312 skb_set_tail_pointer(skb, len);
2313 }
2314
2315 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2316 {
2317 __skb_set_length(skb, len);
2318 }
2319
2320 void skb_trim(struct sk_buff *skb, unsigned int len);
2321
2322 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2323 {
2324 if (skb->data_len)
2325 return ___pskb_trim(skb, len);
2326 __skb_trim(skb, len);
2327 return 0;
2328 }
2329
2330 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2331 {
2332 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2333 }
2334
2335 /**
2336 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
2337 * @skb: buffer to alter
2338 * @len: new length
2339 *
2340 * This is identical to pskb_trim except that the caller knows that
2341 * the skb is not cloned so we should never get an error due to out-
2342 * of-memory.
2343 */
2344 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2345 {
2346 int err = pskb_trim(skb, len);
2347 BUG_ON(err);
2348 }
2349
2350 static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2351 {
2352 unsigned int diff = len - skb->len;
2353
2354 if (skb_tailroom(skb) < diff) {
2355 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2356 GFP_ATOMIC);
2357 if (ret)
2358 return ret;
2359 }
2360 __skb_set_length(skb, len);
2361 return 0;
2362 }
2363
2364 /**
2365 * skb_orphan - orphan a buffer
2366 * @skb: buffer to orphan
2367 *
2368 * If a buffer currently has an owner then we call the owner's
2369 * destructor function and make the @skb unowned. The buffer continues
2370 * to exist but is no longer charged to its former owner.
2371 */
2372 static inline void skb_orphan(struct sk_buff *skb)
2373 {
2374 if (skb->destructor) {
2375 skb->destructor(skb);
2376 skb->destructor = NULL;
2377 skb->sk = NULL;
2378 } else {
2379 BUG_ON(skb->sk);
2380 }
2381 }
2382
2383 /**
2384 * skb_orphan_frags - orphan the frags contained in a buffer
2385 * @skb: buffer to orphan frags from
2386 * @gfp_mask: allocation mask for replacement pages
2387 *
2388 * For each frag in the SKB which needs a destructor (i.e. has an
2389 * owner) create a copy of that frag and release the original
2390 * page by calling the destructor.
2391 */
2392 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2393 {
2394 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2395 return 0;
2396 return skb_copy_ubufs(skb, gfp_mask);
2397 }
2398
2399 /**
2400 * __skb_queue_purge - empty a list
2401 * @list: list to empty
2402 *
2403 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2404 * the list and one reference dropped. This function does not take the
2405 * list lock and the caller must hold the relevant locks to use it.
2406 */
2407 void skb_queue_purge(struct sk_buff_head *list);
2408 static inline void __skb_queue_purge(struct sk_buff_head *list)
2409 {
2410 struct sk_buff *skb;
2411 while ((skb = __skb_dequeue(list)) != NULL)
2412 kfree_skb(skb);
2413 }
2414
2415 void skb_rbtree_purge(struct rb_root *root);
2416
2417 void *netdev_alloc_frag(unsigned int fragsz);
2418
2419 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2420 gfp_t gfp_mask);
2421
2422 /**
2423 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
2424 * @dev: network device to receive on
2425 * @length: length to allocate
2426 *
2427 * Allocate a new &sk_buff and assign it a usage count of one. The
2428 * buffer has unspecified headroom built in. Users should allocate
2429 * the headroom they think they need without accounting for the
2430 * built in space. The built in space is used for optimisations.
2431 *
2432 * %NULL is returned if there is no free memory. Although this function
2433 * allocates memory it can be called from an interrupt.
2434 */
2435 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2436 unsigned int length)
2437 {
2438 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2439 }
2440
2441 /* legacy helper around __netdev_alloc_skb() */
2442 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2443 gfp_t gfp_mask)
2444 {
2445 return __netdev_alloc_skb(NULL, length, gfp_mask);
2446 }
2447
2448 /* legacy helper around netdev_alloc_skb() */
2449 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2450 {
2451 return netdev_alloc_skb(NULL, length);
2452 }
2453
2454
2455 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2456 unsigned int length, gfp_t gfp)
2457 {
2458 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2459
2460 if (NET_IP_ALIGN && skb)
2461 skb_reserve(skb, NET_IP_ALIGN);
2462 return skb;
2463 }
2464
2465 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2466 unsigned int length)
2467 {
2468 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2469 }
2470
2471 static inline void skb_free_frag(void *addr)
2472 {
2473 __free_page_frag(addr);
2474 }
2475
2476 void *napi_alloc_frag(unsigned int fragsz);
2477 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2478 unsigned int length, gfp_t gfp_mask);
2479 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2480 unsigned int length)
2481 {
2482 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2483 }
2484 void napi_consume_skb(struct sk_buff *skb, int budget);
2485
2486 void __kfree_skb_flush(void);
2487 void __kfree_skb_defer(struct sk_buff *skb);
2488
2489 /**
2490 * __dev_alloc_pages - allocate page for network Rx
2491 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2492 * @order: size of the allocation
2493 *
2494 * Allocate a new page.
2495 *
2496 * %NULL is returned if there is no free memory.
2497 */
2498 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2499 unsigned int order)
2500 {
2501 /* This piece of code contains several assumptions.
2502 * 1. This is for device Rx, therefor a cold page is preferred.
2503 * 2. The expectation is the user wants a compound page.
2504 * 3. If requesting a order 0 page it will not be compound
2505 * due to the check to see if order has a value in prep_new_page
2506 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2507 * code in gfp_to_alloc_flags that should be enforcing this.
2508 */
2509 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2510
2511 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2512 }
2513
2514 static inline struct page *dev_alloc_pages(unsigned int order)
2515 {
2516 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2517 }
2518
2519 /**
2520 * __dev_alloc_page - allocate a page for network Rx
2521 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2522 *
2523 * Allocate a new page.
2524 *
2525 * %NULL is returned if there is no free memory.
2526 */
2527 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2528 {
2529 return __dev_alloc_pages(gfp_mask, 0);
2530 }
2531
2532 static inline struct page *dev_alloc_page(void)
2533 {
2534 return dev_alloc_pages(0);
2535 }
2536
2537 /**
2538 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2539 * @page: The page that was allocated from skb_alloc_page
2540 * @skb: The skb that may need pfmemalloc set
2541 */
2542 static inline void skb_propagate_pfmemalloc(struct page *page,
2543 struct sk_buff *skb)
2544 {
2545 if (page_is_pfmemalloc(page))
2546 skb->pfmemalloc = true;
2547 }
2548
2549 /**
2550 * skb_frag_page - retrieve the page referred to by a paged fragment
2551 * @frag: the paged fragment
2552 *
2553 * Returns the &struct page associated with @frag.
2554 */
2555 static inline struct page *skb_frag_page(const skb_frag_t *frag)
2556 {
2557 return frag->page.p;
2558 }
2559
2560 /**
2561 * __skb_frag_ref - take an addition reference on a paged fragment.
2562 * @frag: the paged fragment
2563 *
2564 * Takes an additional reference on the paged fragment @frag.
2565 */
2566 static inline void __skb_frag_ref(skb_frag_t *frag)
2567 {
2568 get_page(skb_frag_page(frag));
2569 }
2570
2571 /**
2572 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2573 * @skb: the buffer
2574 * @f: the fragment offset.
2575 *
2576 * Takes an additional reference on the @f'th paged fragment of @skb.
2577 */
2578 static inline void skb_frag_ref(struct sk_buff *skb, int f)
2579 {
2580 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2581 }
2582
2583 /**
2584 * __skb_frag_unref - release a reference on a paged fragment.
2585 * @frag: the paged fragment
2586 *
2587 * Releases a reference on the paged fragment @frag.
2588 */
2589 static inline void __skb_frag_unref(skb_frag_t *frag)
2590 {
2591 put_page(skb_frag_page(frag));
2592 }
2593
2594 /**
2595 * skb_frag_unref - release a reference on a paged fragment of an skb.
2596 * @skb: the buffer
2597 * @f: the fragment offset
2598 *
2599 * Releases a reference on the @f'th paged fragment of @skb.
2600 */
2601 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2602 {
2603 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2604 }
2605
2606 /**
2607 * skb_frag_address - gets the address of the data contained in a paged fragment
2608 * @frag: the paged fragment buffer
2609 *
2610 * Returns the address of the data within @frag. The page must already
2611 * be mapped.
2612 */
2613 static inline void *skb_frag_address(const skb_frag_t *frag)
2614 {
2615 return page_address(skb_frag_page(frag)) + frag->page_offset;
2616 }
2617
2618 /**
2619 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2620 * @frag: the paged fragment buffer
2621 *
2622 * Returns the address of the data within @frag. Checks that the page
2623 * is mapped and returns %NULL otherwise.
2624 */
2625 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2626 {
2627 void *ptr = page_address(skb_frag_page(frag));
2628 if (unlikely(!ptr))
2629 return NULL;
2630
2631 return ptr + frag->page_offset;
2632 }
2633
2634 /**
2635 * __skb_frag_set_page - sets the page contained in a paged fragment
2636 * @frag: the paged fragment
2637 * @page: the page to set
2638 *
2639 * Sets the fragment @frag to contain @page.
2640 */
2641 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2642 {
2643 frag->page.p = page;
2644 }
2645
2646 /**
2647 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2648 * @skb: the buffer
2649 * @f: the fragment offset
2650 * @page: the page to set
2651 *
2652 * Sets the @f'th fragment of @skb to contain @page.
2653 */
2654 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2655 struct page *page)
2656 {
2657 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2658 }
2659
2660 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2661
2662 /**
2663 * skb_frag_dma_map - maps a paged fragment via the DMA API
2664 * @dev: the device to map the fragment to
2665 * @frag: the paged fragment to map
2666 * @offset: the offset within the fragment (starting at the
2667 * fragment's own offset)
2668 * @size: the number of bytes to map
2669 * @dir: the direction of the mapping (%PCI_DMA_*)
2670 *
2671 * Maps the page associated with @frag to @device.
2672 */
2673 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2674 const skb_frag_t *frag,
2675 size_t offset, size_t size,
2676 enum dma_data_direction dir)
2677 {
2678 return dma_map_page(dev, skb_frag_page(frag),
2679 frag->page_offset + offset, size, dir);
2680 }
2681
2682 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2683 gfp_t gfp_mask)
2684 {
2685 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2686 }
2687
2688
2689 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2690 gfp_t gfp_mask)
2691 {
2692 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2693 }
2694
2695
2696 /**
2697 * skb_clone_writable - is the header of a clone writable
2698 * @skb: buffer to check
2699 * @len: length up to which to write
2700 *
2701 * Returns true if modifying the header part of the cloned buffer
2702 * does not requires the data to be copied.
2703 */
2704 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2705 {
2706 return !skb_header_cloned(skb) &&
2707 skb_headroom(skb) + len <= skb->hdr_len;
2708 }
2709
2710 static inline int skb_try_make_writable(struct sk_buff *skb,
2711 unsigned int write_len)
2712 {
2713 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2714 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2715 }
2716
2717 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2718 int cloned)
2719 {
2720 int delta = 0;
2721
2722 if (headroom > skb_headroom(skb))
2723 delta = headroom - skb_headroom(skb);
2724
2725 if (delta || cloned)
2726 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2727 GFP_ATOMIC);
2728 return 0;
2729 }
2730
2731 /**
2732 * skb_cow - copy header of skb when it is required
2733 * @skb: buffer to cow
2734 * @headroom: needed headroom
2735 *
2736 * If the skb passed lacks sufficient headroom or its data part
2737 * is shared, data is reallocated. If reallocation fails, an error
2738 * is returned and original skb is not changed.
2739 *
2740 * The result is skb with writable area skb->head...skb->tail
2741 * and at least @headroom of space at head.
2742 */
2743 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2744 {
2745 return __skb_cow(skb, headroom, skb_cloned(skb));
2746 }
2747
2748 /**
2749 * skb_cow_head - skb_cow but only making the head writable
2750 * @skb: buffer to cow
2751 * @headroom: needed headroom
2752 *
2753 * This function is identical to skb_cow except that we replace the
2754 * skb_cloned check by skb_header_cloned. It should be used when
2755 * you only need to push on some header and do not need to modify
2756 * the data.
2757 */
2758 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2759 {
2760 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2761 }
2762
2763 /**
2764 * skb_padto - pad an skbuff up to a minimal size
2765 * @skb: buffer to pad
2766 * @len: minimal length
2767 *
2768 * Pads up a buffer to ensure the trailing bytes exist and are
2769 * blanked. If the buffer already contains sufficient data it
2770 * is untouched. Otherwise it is extended. Returns zero on
2771 * success. The skb is freed on error.
2772 */
2773 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2774 {
2775 unsigned int size = skb->len;
2776 if (likely(size >= len))
2777 return 0;
2778 return skb_pad(skb, len - size);
2779 }
2780
2781 /**
2782 * skb_put_padto - increase size and pad an skbuff up to a minimal size
2783 * @skb: buffer to pad
2784 * @len: minimal length
2785 *
2786 * Pads up a buffer to ensure the trailing bytes exist and are
2787 * blanked. If the buffer already contains sufficient data it
2788 * is untouched. Otherwise it is extended. Returns zero on
2789 * success. The skb is freed on error.
2790 */
2791 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2792 {
2793 unsigned int size = skb->len;
2794
2795 if (unlikely(size < len)) {
2796 len -= size;
2797 if (skb_pad(skb, len))
2798 return -ENOMEM;
2799 __skb_put(skb, len);
2800 }
2801 return 0;
2802 }
2803
2804 static inline int skb_add_data(struct sk_buff *skb,
2805 struct iov_iter *from, int copy)
2806 {
2807 const int off = skb->len;
2808
2809 if (skb->ip_summed == CHECKSUM_NONE) {
2810 __wsum csum = 0;
2811 if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
2812 &csum, from) == copy) {
2813 skb->csum = csum_block_add(skb->csum, csum, off);
2814 return 0;
2815 }
2816 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
2817 return 0;
2818
2819 __skb_trim(skb, off);
2820 return -EFAULT;
2821 }
2822
2823 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2824 const struct page *page, int off)
2825 {
2826 if (i) {
2827 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2828
2829 return page == skb_frag_page(frag) &&
2830 off == frag->page_offset + skb_frag_size(frag);
2831 }
2832 return false;
2833 }
2834
2835 static inline int __skb_linearize(struct sk_buff *skb)
2836 {
2837 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2838 }
2839
2840 /**
2841 * skb_linearize - convert paged skb to linear one
2842 * @skb: buffer to linarize
2843 *
2844 * If there is no free memory -ENOMEM is returned, otherwise zero
2845 * is returned and the old skb data released.
2846 */
2847 static inline int skb_linearize(struct sk_buff *skb)
2848 {
2849 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2850 }
2851
2852 /**
2853 * skb_has_shared_frag - can any frag be overwritten
2854 * @skb: buffer to test
2855 *
2856 * Return true if the skb has at least one frag that might be modified
2857 * by an external entity (as in vmsplice()/sendfile())
2858 */
2859 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2860 {
2861 return skb_is_nonlinear(skb) &&
2862 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2863 }
2864
2865 /**
2866 * skb_linearize_cow - make sure skb is linear and writable
2867 * @skb: buffer to process
2868 *
2869 * If there is no free memory -ENOMEM is returned, otherwise zero
2870 * is returned and the old skb data released.
2871 */
2872 static inline int skb_linearize_cow(struct sk_buff *skb)
2873 {
2874 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2875 __skb_linearize(skb) : 0;
2876 }
2877
2878 static __always_inline void
2879 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2880 unsigned int off)
2881 {
2882 if (skb->ip_summed == CHECKSUM_COMPLETE)
2883 skb->csum = csum_block_sub(skb->csum,
2884 csum_partial(start, len, 0), off);
2885 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2886 skb_checksum_start_offset(skb) < 0)
2887 skb->ip_summed = CHECKSUM_NONE;
2888 }
2889
2890 /**
2891 * skb_postpull_rcsum - update checksum for received skb after pull
2892 * @skb: buffer to update
2893 * @start: start of data before pull
2894 * @len: length of data pulled
2895 *
2896 * After doing a pull on a received packet, you need to call this to
2897 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2898 * CHECKSUM_NONE so that it can be recomputed from scratch.
2899 */
2900 static inline void skb_postpull_rcsum(struct sk_buff *skb,
2901 const void *start, unsigned int len)
2902 {
2903 __skb_postpull_rcsum(skb, start, len, 0);
2904 }
2905
2906 static __always_inline void
2907 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2908 unsigned int off)
2909 {
2910 if (skb->ip_summed == CHECKSUM_COMPLETE)
2911 skb->csum = csum_block_add(skb->csum,
2912 csum_partial(start, len, 0), off);
2913 }
2914
2915 /**
2916 * skb_postpush_rcsum - update checksum for received skb after push
2917 * @skb: buffer to update
2918 * @start: start of data after push
2919 * @len: length of data pushed
2920 *
2921 * After doing a push on a received packet, you need to call this to
2922 * update the CHECKSUM_COMPLETE checksum.
2923 */
2924 static inline void skb_postpush_rcsum(struct sk_buff *skb,
2925 const void *start, unsigned int len)
2926 {
2927 __skb_postpush_rcsum(skb, start, len, 0);
2928 }
2929
2930 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2931
2932 /**
2933 * skb_push_rcsum - push skb and update receive checksum
2934 * @skb: buffer to update
2935 * @len: length of data pulled
2936 *
2937 * This function performs an skb_push on the packet and updates
2938 * the CHECKSUM_COMPLETE checksum. It should be used on
2939 * receive path processing instead of skb_push unless you know
2940 * that the checksum difference is zero (e.g., a valid IP header)
2941 * or you are setting ip_summed to CHECKSUM_NONE.
2942 */
2943 static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2944 unsigned int len)
2945 {
2946 skb_push(skb, len);
2947 skb_postpush_rcsum(skb, skb->data, len);
2948 return skb->data;
2949 }
2950
2951 /**
2952 * pskb_trim_rcsum - trim received skb and update checksum
2953 * @skb: buffer to trim
2954 * @len: new length
2955 *
2956 * This is exactly the same as pskb_trim except that it ensures the
2957 * checksum of received packets are still valid after the operation.
2958 */
2959
2960 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2961 {
2962 if (likely(len >= skb->len))
2963 return 0;
2964 if (skb->ip_summed == CHECKSUM_COMPLETE)
2965 skb->ip_summed = CHECKSUM_NONE;
2966 return __pskb_trim(skb, len);
2967 }
2968
2969 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2970 {
2971 if (skb->ip_summed == CHECKSUM_COMPLETE)
2972 skb->ip_summed = CHECKSUM_NONE;
2973 __skb_trim(skb, len);
2974 return 0;
2975 }
2976
2977 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2978 {
2979 if (skb->ip_summed == CHECKSUM_COMPLETE)
2980 skb->ip_summed = CHECKSUM_NONE;
2981 return __skb_grow(skb, len);
2982 }
2983
2984 #define skb_queue_walk(queue, skb) \
2985 for (skb = (queue)->next; \
2986 skb != (struct sk_buff *)(queue); \
2987 skb = skb->next)
2988
2989 #define skb_queue_walk_safe(queue, skb, tmp) \
2990 for (skb = (queue)->next, tmp = skb->next; \
2991 skb != (struct sk_buff *)(queue); \
2992 skb = tmp, tmp = skb->next)
2993
2994 #define skb_queue_walk_from(queue, skb) \
2995 for (; skb != (struct sk_buff *)(queue); \
2996 skb = skb->next)
2997
2998 #define skb_queue_walk_from_safe(queue, skb, tmp) \
2999 for (tmp = skb->next; \
3000 skb != (struct sk_buff *)(queue); \
3001 skb = tmp, tmp = skb->next)
3002
3003 #define skb_queue_reverse_walk(queue, skb) \
3004 for (skb = (queue)->prev; \
3005 skb != (struct sk_buff *)(queue); \
3006 skb = skb->prev)
3007
3008 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3009 for (skb = (queue)->prev, tmp = skb->prev; \
3010 skb != (struct sk_buff *)(queue); \
3011 skb = tmp, tmp = skb->prev)
3012
3013 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3014 for (tmp = skb->prev; \
3015 skb != (struct sk_buff *)(queue); \
3016 skb = tmp, tmp = skb->prev)
3017
3018 static inline bool skb_has_frag_list(const struct sk_buff *skb)
3019 {
3020 return skb_shinfo(skb)->frag_list != NULL;
3021 }
3022
3023 static inline void skb_frag_list_init(struct sk_buff *skb)
3024 {
3025 skb_shinfo(skb)->frag_list = NULL;
3026 }
3027
3028 #define skb_walk_frags(skb, iter) \
3029 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3030
3031
3032 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3033 const struct sk_buff *skb);
3034 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3035 int *peeked, int *off, int *err,
3036 struct sk_buff **last);
3037 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3038 int *peeked, int *off, int *err);
3039 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3040 int *err);
3041 unsigned int datagram_poll(struct file *file, struct socket *sock,
3042 struct poll_table_struct *wait);
3043 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3044 struct iov_iter *to, int size);
3045 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3046 struct msghdr *msg, int size)
3047 {
3048 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3049 }
3050 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3051 struct msghdr *msg);
3052 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3053 struct iov_iter *from, int len);
3054 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3055 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3056 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3057 static inline void skb_free_datagram_locked(struct sock *sk,
3058 struct sk_buff *skb)
3059 {
3060 __skb_free_datagram_locked(sk, skb, 0);
3061 }
3062 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3063 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3064 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3065 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3066 int len, __wsum csum);
3067 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3068 struct pipe_inode_info *pipe, unsigned int len,
3069 unsigned int flags);
3070 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3071 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3072 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3073 int len, int hlen);
3074 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3075 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3076 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3077 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3078 bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3079 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3080 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3081 int skb_ensure_writable(struct sk_buff *skb, int write_len);
3082 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3083 int skb_vlan_pop(struct sk_buff *skb);
3084 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3085 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3086 gfp_t gfp);
3087
3088 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3089 {
3090 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3091 }
3092
3093 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3094 {
3095 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3096 }
3097
3098 struct skb_checksum_ops {
3099 __wsum (*update)(const void *mem, int len, __wsum wsum);
3100 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3101 };
3102
3103 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3104 __wsum csum, const struct skb_checksum_ops *ops);
3105 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3106 __wsum csum);
3107
3108 static inline void * __must_check
3109 __skb_header_pointer(const struct sk_buff *skb, int offset,
3110 int len, void *data, int hlen, void *buffer)
3111 {
3112 if (hlen - offset >= len)
3113 return data + offset;
3114
3115 if (!skb ||
3116 skb_copy_bits(skb, offset, buffer, len) < 0)
3117 return NULL;
3118
3119 return buffer;
3120 }
3121
3122 static inline void * __must_check
3123 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3124 {
3125 return __skb_header_pointer(skb, offset, len, skb->data,
3126 skb_headlen(skb), buffer);
3127 }
3128
3129 /**
3130 * skb_needs_linearize - check if we need to linearize a given skb
3131 * depending on the given device features.
3132 * @skb: socket buffer to check
3133 * @features: net device features
3134 *
3135 * Returns true if either:
3136 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
3137 * 2. skb is fragmented and the device does not support SG.
3138 */
3139 static inline bool skb_needs_linearize(struct sk_buff *skb,
3140 netdev_features_t features)
3141 {
3142 return skb_is_nonlinear(skb) &&
3143 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3144 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3145 }
3146
3147 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3148 void *to,
3149 const unsigned int len)
3150 {
3151 memcpy(to, skb->data, len);
3152 }
3153
3154 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3155 const int offset, void *to,
3156 const unsigned int len)
3157 {
3158 memcpy(to, skb->data + offset, len);
3159 }
3160
3161 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3162 const void *from,
3163 const unsigned int len)
3164 {
3165 memcpy(skb->data, from, len);
3166 }
3167
3168 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3169 const int offset,
3170 const void *from,
3171 const unsigned int len)
3172 {
3173 memcpy(skb->data + offset, from, len);
3174 }
3175
3176 void skb_init(void);
3177
3178 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3179 {
3180 return skb->tstamp;
3181 }
3182
3183 /**
3184 * skb_get_timestamp - get timestamp from a skb
3185 * @skb: skb to get stamp from
3186 * @stamp: pointer to struct timeval to store stamp in
3187 *
3188 * Timestamps are stored in the skb as offsets to a base timestamp.
3189 * This function converts the offset back to a struct timeval and stores
3190 * it in stamp.
3191 */
3192 static inline void skb_get_timestamp(const struct sk_buff *skb,
3193 struct timeval *stamp)
3194 {
3195 *stamp = ktime_to_timeval(skb->tstamp);
3196 }
3197
3198 static inline void skb_get_timestampns(const struct sk_buff *skb,
3199 struct timespec *stamp)
3200 {
3201 *stamp = ktime_to_timespec(skb->tstamp);
3202 }
3203
3204 static inline void __net_timestamp(struct sk_buff *skb)
3205 {
3206 skb->tstamp = ktime_get_real();
3207 }
3208
3209 static inline ktime_t net_timedelta(ktime_t t)
3210 {
3211 return ktime_sub(ktime_get_real(), t);
3212 }
3213
3214 static inline ktime_t net_invalid_timestamp(void)
3215 {
3216 return ktime_set(0, 0);
3217 }
3218
3219 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3220
3221 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3222
3223 void skb_clone_tx_timestamp(struct sk_buff *skb);
3224 bool skb_defer_rx_timestamp(struct sk_buff *skb);
3225
3226 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
3227
3228 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3229 {
3230 }
3231
3232 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3233 {
3234 return false;
3235 }
3236
3237 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
3238
3239 /**
3240 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
3241 *
3242 * PHY drivers may accept clones of transmitted packets for
3243 * timestamping via their phy_driver.txtstamp method. These drivers
3244 * must call this function to return the skb back to the stack with a
3245 * timestamp.
3246 *
3247 * @skb: clone of the the original outgoing packet
3248 * @hwtstamps: hardware time stamps
3249 *
3250 */
3251 void skb_complete_tx_timestamp(struct sk_buff *skb,
3252 struct skb_shared_hwtstamps *hwtstamps);
3253
3254 void __skb_tstamp_tx(struct sk_buff *orig_skb,
3255 struct skb_shared_hwtstamps *hwtstamps,
3256 struct sock *sk, int tstype);
3257
3258 /**
3259 * skb_tstamp_tx - queue clone of skb with send time stamps
3260 * @orig_skb: the original outgoing packet
3261 * @hwtstamps: hardware time stamps, may be NULL if not available
3262 *
3263 * If the skb has a socket associated, then this function clones the
3264 * skb (thus sharing the actual data and optional structures), stores
3265 * the optional hardware time stamping information (if non NULL) or
3266 * generates a software time stamp (otherwise), then queues the clone
3267 * to the error queue of the socket. Errors are silently ignored.
3268 */
3269 void skb_tstamp_tx(struct sk_buff *orig_skb,
3270 struct skb_shared_hwtstamps *hwtstamps);
3271
3272 static inline void sw_tx_timestamp(struct sk_buff *skb)
3273 {
3274 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
3275 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3276 skb_tstamp_tx(skb, NULL);
3277 }
3278
3279 /**
3280 * skb_tx_timestamp() - Driver hook for transmit timestamping
3281 *
3282 * Ethernet MAC Drivers should call this function in their hard_xmit()
3283 * function immediately before giving the sk_buff to the MAC hardware.
3284 *
3285 * Specifically, one should make absolutely sure that this function is
3286 * called before TX completion of this packet can trigger. Otherwise
3287 * the packet could potentially already be freed.
3288 *
3289 * @skb: A socket buffer.
3290 */
3291 static inline void skb_tx_timestamp(struct sk_buff *skb)
3292 {
3293 skb_clone_tx_timestamp(skb);
3294 sw_tx_timestamp(skb);
3295 }
3296
3297 /**
3298 * skb_complete_wifi_ack - deliver skb with wifi status
3299 *
3300 * @skb: the original outgoing packet
3301 * @acked: ack status
3302 *
3303 */
3304 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3305
3306 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3307 __sum16 __skb_checksum_complete(struct sk_buff *skb);
3308
3309 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3310 {
3311 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3312 skb->csum_valid ||
3313 (skb->ip_summed == CHECKSUM_PARTIAL &&
3314 skb_checksum_start_offset(skb) >= 0));
3315 }
3316
3317 /**
3318 * skb_checksum_complete - Calculate checksum of an entire packet
3319 * @skb: packet to process
3320 *
3321 * This function calculates the checksum over the entire packet plus
3322 * the value of skb->csum. The latter can be used to supply the
3323 * checksum of a pseudo header as used by TCP/UDP. It returns the
3324 * checksum.
3325 *
3326 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
3327 * this function can be used to verify that checksum on received
3328 * packets. In that case the function should return zero if the
3329 * checksum is correct. In particular, this function will return zero
3330 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
3331 * hardware has already verified the correctness of the checksum.
3332 */
3333 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3334 {
3335 return skb_csum_unnecessary(skb) ?
3336 0 : __skb_checksum_complete(skb);
3337 }
3338
3339 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3340 {
3341 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3342 if (skb->csum_level == 0)
3343 skb->ip_summed = CHECKSUM_NONE;
3344 else
3345 skb->csum_level--;
3346 }
3347 }
3348
3349 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3350 {
3351 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3352 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3353 skb->csum_level++;
3354 } else if (skb->ip_summed == CHECKSUM_NONE) {
3355 skb->ip_summed = CHECKSUM_UNNECESSARY;
3356 skb->csum_level = 0;
3357 }
3358 }
3359
3360 static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
3361 {
3362 /* Mark current checksum as bad (typically called from GRO
3363 * path). In the case that ip_summed is CHECKSUM_NONE
3364 * this must be the first checksum encountered in the packet.
3365 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
3366 * checksum after the last one validated. For UDP, a zero
3367 * checksum can not be marked as bad.
3368 */
3369
3370 if (skb->ip_summed == CHECKSUM_NONE ||
3371 skb->ip_summed == CHECKSUM_UNNECESSARY)
3372 skb->csum_bad = 1;
3373 }
3374
3375 /* Check if we need to perform checksum complete validation.
3376 *
3377 * Returns true if checksum complete is needed, false otherwise
3378 * (either checksum is unnecessary or zero checksum is allowed).
3379 */
3380 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3381 bool zero_okay,
3382 __sum16 check)
3383 {
3384 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3385 skb->csum_valid = 1;
3386 __skb_decr_checksum_unnecessary(skb);
3387 return false;
3388 }
3389
3390 return true;
3391 }
3392
3393 /* For small packets <= CHECKSUM_BREAK peform checksum complete directly
3394 * in checksum_init.
3395 */
3396 #define CHECKSUM_BREAK 76
3397
3398 /* Unset checksum-complete
3399 *
3400 * Unset checksum complete can be done when packet is being modified
3401 * (uncompressed for instance) and checksum-complete value is
3402 * invalidated.
3403 */
3404 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3405 {
3406 if (skb->ip_summed == CHECKSUM_COMPLETE)
3407 skb->ip_summed = CHECKSUM_NONE;
3408 }
3409
3410 /* Validate (init) checksum based on checksum complete.
3411 *
3412 * Return values:
3413 * 0: checksum is validated or try to in skb_checksum_complete. In the latter
3414 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
3415 * checksum is stored in skb->csum for use in __skb_checksum_complete
3416 * non-zero: value of invalid checksum
3417 *
3418 */
3419 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3420 bool complete,
3421 __wsum psum)
3422 {
3423 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3424 if (!csum_fold(csum_add(psum, skb->csum))) {
3425 skb->csum_valid = 1;
3426 return 0;
3427 }
3428 } else if (skb->csum_bad) {
3429 /* ip_summed == CHECKSUM_NONE in this case */
3430 return (__force __sum16)1;
3431 }
3432
3433 skb->csum = psum;
3434
3435 if (complete || skb->len <= CHECKSUM_BREAK) {
3436 __sum16 csum;
3437
3438 csum = __skb_checksum_complete(skb);
3439 skb->csum_valid = !csum;
3440 return csum;
3441 }
3442
3443 return 0;
3444 }
3445
3446 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3447 {
3448 return 0;
3449 }
3450
3451 /* Perform checksum validate (init). Note that this is a macro since we only
3452 * want to calculate the pseudo header which is an input function if necessary.
3453 * First we try to validate without any computation (checksum unnecessary) and
3454 * then calculate based on checksum complete calling the function to compute
3455 * pseudo header.
3456 *
3457 * Return values:
3458 * 0: checksum is validated or try to in skb_checksum_complete
3459 * non-zero: value of invalid checksum
3460 */
3461 #define __skb_checksum_validate(skb, proto, complete, \
3462 zero_okay, check, compute_pseudo) \
3463 ({ \
3464 __sum16 __ret = 0; \
3465 skb->csum_valid = 0; \
3466 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3467 __ret = __skb_checksum_validate_complete(skb, \
3468 complete, compute_pseudo(skb, proto)); \
3469 __ret; \
3470 })
3471
3472 #define skb_checksum_init(skb, proto, compute_pseudo) \
3473 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3474
3475 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3476 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3477
3478 #define skb_checksum_validate(skb, proto, compute_pseudo) \
3479 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3480
3481 #define skb_checksum_validate_zero_check(skb, proto, check, \
3482 compute_pseudo) \
3483 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3484
3485 #define skb_checksum_simple_validate(skb) \
3486 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3487
3488 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3489 {
3490 return (skb->ip_summed == CHECKSUM_NONE &&
3491 skb->csum_valid && !skb->csum_bad);
3492 }
3493
3494 static inline void __skb_checksum_convert(struct sk_buff *skb,
3495 __sum16 check, __wsum pseudo)
3496 {
3497 skb->csum = ~pseudo;
3498 skb->ip_summed = CHECKSUM_COMPLETE;
3499 }
3500
3501 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3502 do { \
3503 if (__skb_checksum_convert_check(skb)) \
3504 __skb_checksum_convert(skb, check, \
3505 compute_pseudo(skb, proto)); \
3506 } while (0)
3507
3508 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3509 u16 start, u16 offset)
3510 {
3511 skb->ip_summed = CHECKSUM_PARTIAL;
3512 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3513 skb->csum_offset = offset - start;
3514 }
3515
3516 /* Update skbuf and packet to reflect the remote checksum offload operation.
3517 * When called, ptr indicates the starting point for skb->csum when
3518 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
3519 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
3520 */
3521 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3522 int start, int offset, bool nopartial)
3523 {
3524 __wsum delta;
3525
3526 if (!nopartial) {
3527 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3528 return;
3529 }
3530
3531 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3532 __skb_checksum_complete(skb);
3533 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3534 }
3535
3536 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3537
3538 /* Adjust skb->csum since we changed the packet */
3539 skb->csum = csum_add(skb->csum, delta);
3540 }
3541
3542 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3543 void nf_conntrack_destroy(struct nf_conntrack *nfct);
3544 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3545 {
3546 if (nfct && atomic_dec_and_test(&nfct->use))
3547 nf_conntrack_destroy(nfct);
3548 }
3549 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3550 {
3551 if (nfct)
3552 atomic_inc(&nfct->use);
3553 }
3554 #endif
3555 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3556 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3557 {
3558 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3559 kfree(nf_bridge);
3560 }
3561 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3562 {
3563 if (nf_bridge)
3564 atomic_inc(&nf_bridge->use);
3565 }
3566 #endif /* CONFIG_BRIDGE_NETFILTER */
3567 static inline void nf_reset(struct sk_buff *skb)
3568 {
3569 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3570 nf_conntrack_put(skb->nfct);
3571 skb->nfct = NULL;
3572 #endif
3573 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3574 nf_bridge_put(skb->nf_bridge);
3575 skb->nf_bridge = NULL;
3576 #endif
3577 }
3578
3579 static inline void nf_reset_trace(struct sk_buff *skb)
3580 {
3581 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3582 skb->nf_trace = 0;
3583 #endif
3584 }
3585
3586 /* Note: This doesn't put any conntrack and bridge info in dst. */
3587 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3588 bool copy)
3589 {
3590 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3591 dst->nfct = src->nfct;
3592 nf_conntrack_get(src->nfct);
3593 if (copy)
3594 dst->nfctinfo = src->nfctinfo;
3595 #endif
3596 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3597 dst->nf_bridge = src->nf_bridge;
3598 nf_bridge_get(src->nf_bridge);
3599 #endif
3600 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3601 if (copy)
3602 dst->nf_trace = src->nf_trace;
3603 #endif
3604 }
3605
3606 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3607 {
3608 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3609 nf_conntrack_put(dst->nfct);
3610 #endif
3611 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3612 nf_bridge_put(dst->nf_bridge);
3613 #endif
3614 __nf_copy(dst, src, true);
3615 }
3616
3617 #ifdef CONFIG_NETWORK_SECMARK
3618 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3619 {
3620 to->secmark = from->secmark;
3621 }
3622
3623 static inline void skb_init_secmark(struct sk_buff *skb)
3624 {
3625 skb->secmark = 0;
3626 }
3627 #else
3628 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3629 { }
3630
3631 static inline void skb_init_secmark(struct sk_buff *skb)
3632 { }
3633 #endif
3634
3635 static inline bool skb_irq_freeable(const struct sk_buff *skb)
3636 {
3637 return !skb->destructor &&
3638 #if IS_ENABLED(CONFIG_XFRM)
3639 !skb->sp &&
3640 #endif
3641 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
3642 !skb->nfct &&
3643 #endif
3644 !skb->_skb_refdst &&
3645 !skb_has_frag_list(skb);
3646 }
3647
3648 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3649 {
3650 skb->queue_mapping = queue_mapping;
3651 }
3652
3653 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3654 {
3655 return skb->queue_mapping;
3656 }
3657
3658 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3659 {
3660 to->queue_mapping = from->queue_mapping;
3661 }
3662
3663 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3664 {
3665 skb->queue_mapping = rx_queue + 1;
3666 }
3667
3668 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3669 {
3670 return skb->queue_mapping - 1;
3671 }
3672
3673 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3674 {
3675 return skb->queue_mapping != 0;
3676 }
3677
3678 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3679 {
3680 #ifdef CONFIG_XFRM
3681 return skb->sp;
3682 #else
3683 return NULL;
3684 #endif
3685 }
3686
3687 /* Keeps track of mac header offset relative to skb->head.
3688 * It is useful for TSO of Tunneling protocol. e.g. GRE.
3689 * For non-tunnel skb it points to skb_mac_header() and for
3690 * tunnel skb it points to outer mac header.
3691 * Keeps track of level of encapsulation of network headers.
3692 */
3693 struct skb_gso_cb {
3694 union {
3695 int mac_offset;
3696 int data_offset;
3697 };
3698 int encap_level;
3699 __wsum csum;
3700 __u16 csum_start;
3701 };
3702 #define SKB_SGO_CB_OFFSET 32
3703 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3704
3705 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3706 {
3707 return (skb_mac_header(inner_skb) - inner_skb->head) -
3708 SKB_GSO_CB(inner_skb)->mac_offset;
3709 }
3710
3711 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3712 {
3713 int new_headroom, headroom;
3714 int ret;
3715
3716 headroom = skb_headroom(skb);
3717 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3718 if (ret)
3719 return ret;
3720
3721 new_headroom = skb_headroom(skb);
3722 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3723 return 0;
3724 }
3725
3726 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3727 {
3728 /* Do not update partial checksums if remote checksum is enabled. */
3729 if (skb->remcsum_offload)
3730 return;
3731
3732 SKB_GSO_CB(skb)->csum = res;
3733 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3734 }
3735
3736 /* Compute the checksum for a gso segment. First compute the checksum value
3737 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
3738 * then add in skb->csum (checksum from csum_start to end of packet).
3739 * skb->csum and csum_start are then updated to reflect the checksum of the
3740 * resultant packet starting from the transport header-- the resultant checksum
3741 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
3742 * header.
3743 */
3744 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3745 {
3746 unsigned char *csum_start = skb_transport_header(skb);
3747 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3748 __wsum partial = SKB_GSO_CB(skb)->csum;
3749
3750 SKB_GSO_CB(skb)->csum = res;
3751 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3752
3753 return csum_fold(csum_partial(csum_start, plen, partial));
3754 }
3755
3756 static inline bool skb_is_gso(const struct sk_buff *skb)
3757 {
3758 return skb_shinfo(skb)->gso_size;
3759 }
3760
3761 /* Note: Should be called only if skb_is_gso(skb) is true */
3762 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3763 {
3764 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3765 }
3766
3767 static inline void skb_gso_reset(struct sk_buff *skb)
3768 {
3769 skb_shinfo(skb)->gso_size = 0;
3770 skb_shinfo(skb)->gso_segs = 0;
3771 skb_shinfo(skb)->gso_type = 0;
3772 }
3773
3774 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3775
3776 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3777 {
3778 /* LRO sets gso_size but not gso_type, whereas if GSO is really
3779 * wanted then gso_type will be set. */
3780 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3781
3782 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3783 unlikely(shinfo->gso_type == 0)) {
3784 __skb_warn_lro_forwarding(skb);
3785 return true;
3786 }
3787 return false;
3788 }
3789
3790 static inline void skb_forward_csum(struct sk_buff *skb)
3791 {
3792 /* Unfortunately we don't support this one. Any brave souls? */
3793 if (skb->ip_summed == CHECKSUM_COMPLETE)
3794 skb->ip_summed = CHECKSUM_NONE;
3795 }
3796
3797 /**
3798 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
3799 * @skb: skb to check
3800 *
3801 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
3802 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
3803 * use this helper, to document places where we make this assertion.
3804 */
3805 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3806 {
3807 #ifdef DEBUG
3808 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3809 #endif
3810 }
3811
3812 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3813
3814 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3815 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3816 unsigned int transport_len,
3817 __sum16(*skb_chkf)(struct sk_buff *skb));
3818
3819 /**
3820 * skb_head_is_locked - Determine if the skb->head is locked down
3821 * @skb: skb to check
3822 *
3823 * The head on skbs build around a head frag can be removed if they are
3824 * not cloned. This function returns true if the skb head is locked down
3825 * due to either being allocated via kmalloc, or by being a clone with
3826 * multiple references to the head.
3827 */
3828 static inline bool skb_head_is_locked(const struct sk_buff *skb)
3829 {
3830 return !skb->head_frag || skb_cloned(skb);
3831 }
3832
3833 /**
3834 * skb_gso_network_seglen - Return length of individual segments of a gso packet
3835 *
3836 * @skb: GSO skb
3837 *
3838 * skb_gso_network_seglen is used to determine the real size of the
3839 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
3840 *
3841 * The MAC/L2 header is not accounted for.
3842 */
3843 static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3844 {
3845 unsigned int hdr_len = skb_transport_header(skb) -
3846 skb_network_header(skb);
3847 return hdr_len + skb_gso_transport_seglen(skb);
3848 }
3849
3850 /* Local Checksum Offload.
3851 * Compute outer checksum based on the assumption that the
3852 * inner checksum will be offloaded later.
3853 * See Documentation/networking/checksum-offloads.txt for
3854 * explanation of how this works.
3855 * Fill in outer checksum adjustment (e.g. with sum of outer
3856 * pseudo-header) before calling.
3857 * Also ensure that inner checksum is in linear data area.
3858 */
3859 static inline __wsum lco_csum(struct sk_buff *skb)
3860 {
3861 unsigned char *csum_start = skb_checksum_start(skb);
3862 unsigned char *l4_hdr = skb_transport_header(skb);
3863 __wsum partial;
3864
3865 /* Start with complement of inner checksum adjustment */
3866 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3867 skb->csum_offset));
3868
3869 /* Add in checksum of our headers (incl. outer checksum
3870 * adjustment filled in by caller) and return result.
3871 */
3872 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3873 }
3874
3875 #endif /* __KERNEL__ */
3876 #endif /* _LINUX_SKBUFF_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-4.9-rc1.tar.xz | drivers/net/wireless/admtek/adm8211.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-12-03 00:55:13 | L0262 |
Comment
Reported: 3 Dec 2016
[Home]