Error Trace

[Home]

Bug # 159

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
19 typedef signed char __s8;
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
18 typedef short s16;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
33 typedef __u16 __be16;
35 typedef __u32 __be32;
39 typedef __u16 __sum16;
40 typedef __u32 __wsum;
280 struct kernel_symbol { unsigned long value; const char *name; } ;
34 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
108 typedef __u32 uint32_t;
111 typedef __u64 uint64_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
161 typedef u64 phys_addr_t;
166 typedef phys_addr_t resource_size_t;
172 typedef unsigned long irq_hw_number_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
115 typedef void (*ctor_fn_t)();
68 struct ctl_table ;
259 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
58 struct device ;
64 struct net_device ;
450 struct file_operations ;
462 struct completion ;
463 struct pt_regs ;
557 struct task_struct ;
27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
15 struct lockdep_map ;
26 union __anonunion___u_25 { int __val; char __c[1U]; } ;
23 typedef atomic64_t atomic_long_t;
242 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_37 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_36 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_37 __annonCompField4; } ;
33 struct spinlock { union __anonunion____missing_field_name_36 __annonCompField5; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_38 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_38 rwlock_t;
23 struct mm_struct ;
72 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_40 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_41 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_39 { struct __anonstruct____missing_field_name_40 __annonCompField6; struct __anonstruct____missing_field_name_41 __annonCompField7; } ;
66 struct desc_struct { union __anonunion____missing_field_name_39 __annonCompField8; } ;
13 typedef unsigned long pteval_t;
14 typedef unsigned long pmdval_t;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
19 struct __anonstruct_pte_t_42 { pteval_t pte; } ;
19 typedef struct __anonstruct_pte_t_42 pte_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_43 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_43 pgd_t;
297 struct __anonstruct_pmd_t_45 { pmdval_t pmd; } ;
297 typedef struct __anonstruct_pmd_t_45 pmd_t;
423 struct page ;
423 typedef struct page *pgtable_t;
434 struct file ;
445 struct seq_file ;
481 struct thread_struct ;
483 struct cpumask ;
484 struct paravirt_callee_save { void *func; } ;
181 struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save restore_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; void (*safe_halt)(); void (*halt)(); void (*adjust_exception_frame)(); } ;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
83 struct static_key { atomic_t enabled; } ;
359 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
654 typedef struct cpumask *cpumask_var_t;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
246 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_58 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_59 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_57 { struct __anonstruct____missing_field_name_58 __annonCompField14; struct __anonstruct____missing_field_name_59 __annonCompField15; } ;
26 union __anonunion____missing_field_name_60 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_57 __annonCompField16; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_60 __annonCompField17; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
169 struct seq_operations ;
372 struct perf_event ;
377 struct __anonstruct_mm_segment_t_72 { unsigned long seg; } ;
377 typedef struct __anonstruct_mm_segment_t_72 mm_segment_t;
378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
177 struct timespec ;
178 struct compat_timespec ;
179 struct thread_info { unsigned long flags; } ;
20 struct __anonstruct_futex_74 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
20 struct __anonstruct_nanosleep_75 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
20 struct pollfd ;
20 struct __anonstruct_poll_76 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
20 union __anonunion____missing_field_name_73 { struct __anonstruct_futex_74 futex; struct __anonstruct_nanosleep_75 nanosleep; struct __anonstruct_poll_76 poll; } ;
20 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_73 __annonCompField20; } ;
416 struct rw_semaphore ;
417 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
178 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_91 seqlock_t;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
1225 struct completion { unsigned int done; wait_queue_head_t wait; } ;
108 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
450 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1145 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_96 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_96 kuid_t;
27 struct __anonstruct_kgid_t_97 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_97 kgid_t;
835 struct nsproxy ;
836 struct ctl_table_root ;
837 struct ctl_table_header ;
838 struct ctl_dir ;
39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
61 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
100 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
121 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
126 struct __anonstruct____missing_field_name_99 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
126 union __anonunion____missing_field_name_98 { struct __anonstruct____missing_field_name_99 __annonCompField21; struct callback_head rcu; } ;
126 struct ctl_table_set ;
126 struct ctl_table_header { union __anonunion____missing_field_name_98 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ;
147 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
153 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
158 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
278 struct workqueue_struct ;
279 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
268 struct notifier_block ;
53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
215 struct clk ;
503 struct device_node ;
135 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
97 struct __anonstruct_nodemask_t_100 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_100 nodemask_t;
249 typedef unsigned int isolate_mode_t;
777 struct resource ;
66 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
34 struct ldt_struct ;
34 struct vdso_image ;
34 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; } ;
34 typedef struct __anonstruct_mm_context_t_165 mm_context_t;
22 struct bio_vec ;
1290 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ;
152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ;
152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ;
152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ;
152 struct dev_pagemap ;
152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ;
152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ;
152 struct kmem_cache ;
152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ;
197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
282 struct userfaultfd_ctx ;
282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ;
289 struct anon_vma ;
289 struct vm_operations_struct ;
289 struct mempolicy ;
289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
362 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
381 struct task_rss_stat { int events; int count[4U]; } ;
389 struct mm_rss_stat { atomic_long_t count[4U]; } ;
394 struct kioctx_table ;
395 struct linux_binfmt ;
395 struct mmu_notifier_mm ;
395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
563 struct vm_fault ;
617 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
199 struct dentry ;
200 struct iattr ;
201 struct super_block ;
202 struct file_system_type ;
203 struct kernfs_open_node ;
204 struct kernfs_iattrs ;
227 struct kernfs_root ;
227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
511 struct sock ;
512 struct kobject ;
513 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
519 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct exception_table_entry ;
24 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
329 struct module_sect_attrs ;
329 struct module_notes_attrs ;
329 struct trace_event_call ;
329 struct trace_enum_map ;
329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
26 struct sem_undo_list ;
26 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_245 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_245 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
38 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_247 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_248 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_249 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_250 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_253 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_252 { struct __anonstruct__addr_bnd_253 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_251 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_252 __annonCompField52; } ;
11 struct __anonstruct__sigpoll_254 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_255 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_246 { int _pad[28U]; struct __anonstruct__kill_247 _kill; struct __anonstruct__timer_248 _timer; struct __anonstruct__rt_249 _rt; struct __anonstruct__sigchld_250 _sigchld; struct __anonstruct__sigfault_251 _sigfault; struct __anonstruct__sigpoll_254 _sigpoll; struct __anonstruct__sigsys_255 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_246 _sifields; } ;
118 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
271 struct k_sigaction { struct sigaction sa; } ;
457 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
464 struct pid_namespace ;
464 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ;
125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
41 struct assoc_array_ptr ;
41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_290 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_291 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_293 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_292 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_293 __annonCompField55; } ;
128 struct __anonstruct____missing_field_name_295 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_294 { union key_payload payload; struct __anonstruct____missing_field_name_295 __annonCompField57; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_290 __annonCompField53; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_291 __annonCompField54; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_292 __annonCompField56; union __anonunion____missing_field_name_294 __annonCompField58; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
377 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ;
85 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
368 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
325 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
331 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; wait_queue_head_t writer; int readers_block; } ;
144 struct cgroup ;
145 struct cgroup_root ;
146 struct cgroup_subsys ;
147 struct cgroup_taskset ;
191 struct cgroup_file { struct kernfs_node *kn; } ;
90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ;
221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ;
306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
135 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
495 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
539 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
547 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
554 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
579 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
595 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
617 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
662 struct autogroup ;
663 struct tty_struct ;
663 struct taskstats ;
663 struct tty_audit_buf ;
663 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; } ;
839 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
884 struct backing_dev_info ;
885 struct reclaim_state ;
886 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
900 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
957 struct wake_q_node { struct wake_q_node *next; } ;
1200 struct io_context ;
1234 struct pipe_inode_info ;
1235 struct uts_namespace ;
1236 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1243 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1301 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1336 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1373 struct rt_rq ;
1373 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1391 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1455 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1474 struct sched_class ;
1474 struct files_struct ;
1474 struct compat_robust_list_head ;
1474 struct numa_group ;
1474 struct kcov ;
1474 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
484 struct platform_device_id { char name[20U]; kernel_ulong_t driver_data; } ;
674 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ;
683 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ;
32 typedef u32 phandle;
34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ;
44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ;
1275 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_343 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_342 { struct __anonstruct____missing_field_name_343 __annonCompField65; } ;
114 struct lockref { union __anonunion____missing_field_name_342 __annonCompField66; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_345 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_344 { struct __anonstruct____missing_field_name_345 __annonCompField67; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_344 __annonCompField68; const unsigned char *name; } ;
65 struct dentry_operations ;
65 union __anonunion____missing_field_name_346 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
65 union __anonunion_d_u_347 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_346 __annonCompField69; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_347 d_u; } ;
121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
592 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
63 struct __anonstruct____missing_field_name_349 { struct radix_tree_node *parent; void *private_data; } ;
63 union __anonunion____missing_field_name_348 { struct __anonstruct____missing_field_name_349 __annonCompField70; struct callback_head callback_head; } ;
63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_348 __annonCompField71; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
44 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
87 struct block_device ;
273 struct delayed_call { void (*fn)(void *); void *arg; } ;
264 struct bdi_writeback ;
265 struct export_operations ;
267 struct iovec ;
268 struct kiocb ;
269 struct poll_table_struct ;
270 struct kstatfs ;
271 struct swap_info_struct ;
272 struct iov_iter ;
273 struct fscrypt_info ;
274 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
262 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_357 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_357 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_358 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_358 __annonCompField73; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
541 struct writeback_control ;
542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
368 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
427 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ;
449 struct request_queue ;
450 struct hd_struct ;
450 struct gendisk ;
450 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
565 struct posix_acl ;
592 struct inode_operations ;
592 union __anonunion____missing_field_name_363 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
592 union __anonunion____missing_field_name_364 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
592 struct file_lock_context ;
592 struct cdev ;
592 union __anonunion____missing_field_name_365 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
592 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_363 __annonCompField74; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_364 __annonCompField75; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_365 __annonCompField76; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
847 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
855 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
878 union __anonunion_f_u_366 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
878 struct file { union __anonunion_f_u_366 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
963 typedef void *fl_owner_t;
964 struct file_lock ;
965 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
971 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
992 struct net ;
998 struct nlm_lockowner ;
999 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_368 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_367 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_368 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_367 fl_u; } ;
1051 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1271 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1306 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1336 struct super_operations ;
1336 struct xattr_handler ;
1336 struct mtd_info ;
1336 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1620 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1633 struct dir_context ;
1658 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1665 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1734 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1784 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
2027 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3211 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
76 struct dma_map_ops ;
76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
21 struct pdev_archdata { } ;
24 struct device_private ;
25 struct device_driver ;
26 struct driver_private ;
27 struct class ;
28 struct subsys_private ;
29 struct bus_type ;
30 struct iommu_ops ;
31 struct iommu_group ;
32 struct iommu_fwspec ;
62 struct device_attribute ;
62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
143 struct device_type ;
202 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
208 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
358 struct class_attribute ;
358 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
451 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
519 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
547 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
700 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
709 struct irq_domain ;
709 struct dma_coherent_mem ;
709 struct cma ;
709 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ;
865 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
1330 struct irq_desc ;
1331 struct irq_data ;
13 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
30 struct msi_msg ;
31 enum irqchip_irq_state ;
63 struct msi_desc ;
64 struct irq_common_data { unsigned int state_use_accessors; unsigned int node; void *handler_data; struct msi_desc *msi_desc; cpumask_var_t affinity; } ;
151 struct irq_chip ;
151 struct irq_data { u32 mask; unsigned int irq; unsigned long hwirq; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; struct irq_data *parent_data; void *chip_data; } ;
321 struct irq_chip { struct device *parent_device; const char *name; unsigned int (*irq_startup)(struct irq_data *); void (*irq_shutdown)(struct irq_data *); void (*irq_enable)(struct irq_data *); void (*irq_disable)(struct irq_data *); void (*irq_ack)(struct irq_data *); void (*irq_mask)(struct irq_data *); void (*irq_mask_ack)(struct irq_data *); void (*irq_unmask)(struct irq_data *); void (*irq_eoi)(struct irq_data *); int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool ); int (*irq_retrigger)(struct irq_data *); int (*irq_set_type)(struct irq_data *, unsigned int); int (*irq_set_wake)(struct irq_data *, unsigned int); void (*irq_bus_lock)(struct irq_data *); void (*irq_bus_sync_unlock)(struct irq_data *); void (*irq_cpu_online)(struct irq_data *); void (*irq_cpu_offline)(struct irq_data *); void (*irq_suspend)(struct irq_data *); void (*irq_resume)(struct irq_data *); void (*irq_pm_shutdown)(struct irq_data *); void (*irq_calc_mask)(struct irq_data *); void (*irq_print_chip)(struct irq_data *, struct seq_file *); int (*irq_request_resources)(struct irq_data *); void (*irq_release_resources)(struct irq_data *); void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state , bool *); int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state , bool ); int (*irq_set_vcpu_affinity)(struct irq_data *, void *); void (*ipi_send_single)(struct irq_data *, unsigned int); void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); unsigned long flags; } ;
422 struct irq_affinity_notify ;
423 struct proc_dir_entry ;
424 struct irqaction ;
424 struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; unsigned int *kstat_irqs; void (*handle_irq)(struct irq_desc *); struct irqaction *action; unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; unsigned int wake_depth; unsigned int irq_count; unsigned long last_unhandled; unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; cpumask_var_t pending_mask; unsigned long threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; unsigned int nr_actions; unsigned int no_suspend_depth; unsigned int cond_suspend_depth; unsigned int force_resume_depth; struct proc_dir_entry *dir; struct callback_head rcu; struct kobject kobj; int parent_irq; struct module *owner; const char *name; } ;
130 struct exception_table_entry { int insn; int fixup; int handler; } ;
739 struct irq_chip_regs { unsigned long enable; unsigned long disable; unsigned long mask; unsigned long ack; unsigned long eoi; unsigned long type; unsigned long polarity; } ;
778 struct irq_chip_type { struct irq_chip chip; struct irq_chip_regs regs; void (*handler)(struct irq_desc *); u32 type; u32 mask_cache_priv; u32 *mask_cache; } ;
800 struct irq_chip_generic { raw_spinlock_t lock; void *reg_base; u32 (*reg_readl)(void *); void (*reg_writel)(u32 , void *); void (*suspend)(struct irq_chip_generic *); void (*resume)(struct irq_chip_generic *); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; u32 type_cache; u32 polarity_cache; u32 wake_enabled; u32 wake_active; unsigned int num_ct; void *private; unsigned long installed; unsigned long unused; struct irq_domain *domain; struct list_head list; struct irq_chip_type chip_types[0U]; } ;
856 enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1, IRQ_GC_INIT_NESTED_LOCK = 2, IRQ_GC_MASK_CACHE_PER_TYPE = 4, IRQ_GC_NO_MASK = 8, IRQ_GC_BE_IO = 16 } ;
864 struct irq_domain_chip_generic { unsigned int irqs_per_chip; unsigned int num_chips; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; struct irq_chip_generic *gc[0U]; } ;
51 struct irq_fwspec { struct fwnode_handle *fwnode; int param_count; u32 param[16U]; } ;
64 enum irq_domain_bus_token { DOMAIN_BUS_ANY = 0, DOMAIN_BUS_WIRED = 1, DOMAIN_BUS_PCI_MSI = 2, DOMAIN_BUS_PLATFORM_MSI = 3, DOMAIN_BUS_NEXUS = 4, DOMAIN_BUS_IPI = 5, DOMAIN_BUS_FSL_MC_MSI = 6 } ;
74 struct irq_domain_ops { int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token ); int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token ); int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t ); void (*unmap)(struct irq_domain *, unsigned int); int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, unsigned long *, unsigned int *); int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); void (*free)(struct irq_domain *, unsigned int, unsigned int); void (*activate)(struct irq_domain *, struct irq_data *); void (*deactivate)(struct irq_domain *, struct irq_data *); int (*translate)(struct irq_domain *, struct irq_fwspec *, unsigned long *, unsigned int *); } ;
122 struct irq_domain { struct list_head link; const char *name; const struct irq_domain_ops *ops; void *host_data; unsigned int flags; struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; struct irq_domain *parent; irq_hw_number_t hwirq_max; unsigned int revmap_direct_max_irq; unsigned int revmap_size; struct radix_tree_root revmap_tree; unsigned int linear_revmap[]; } ;
184 struct gpio_desc ;
93 struct irqaction { irqreturn_t (*handler)(int, void *); void *dev_id; void *percpu_dev_id; struct irqaction *next; irqreturn_t (*thread_fn)(int, void *); struct task_struct *thread; struct irqaction *secondary; unsigned int irq; unsigned int flags; unsigned long thread_flags; unsigned long thread_mask; const char *name; struct proc_dir_entry *dir; } ;
214 struct irq_affinity_notify { unsigned int irq; struct kref kref; struct work_struct work; void (*notify)(struct irq_affinity_notify *, const cpumask_t *); void (*release)(struct kref *); } ;
392 enum irqchip_irq_state { IRQCHIP_STATE_PENDING = 0, IRQCHIP_STATE_ACTIVE = 1, IRQCHIP_STATE_MASKED = 2, IRQCHIP_STATE_LINE_LEVEL = 3 } ;
494 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
56 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
29 union __anonunion____missing_field_name_378 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; struct pipe_inode_info *pipe; } ;
29 union __anonunion____missing_field_name_379 { unsigned long nr_segs; int idx; } ;
29 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_378 __annonCompField85; union __anonunion____missing_field_name_379 __annonCompField86; } ;
273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ;
308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ;
335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
2450 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
1418 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
11 typedef unsigned short __kernel_sa_family_t;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
43 struct __anonstruct_sync_serial_settings_391 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_391 sync_serial_settings;
50 struct __anonstruct_te1_settings_392 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_392 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_393 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_393 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_394 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_394 fr_proto;
69 struct __anonstruct_fr_proto_pvc_395 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_395 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_396 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_396 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_397 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_397 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
197 union __anonunion_ifs_ifsu_398 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_398 ifs_ifsu; } ;
216 union __anonunion_ifr_ifrn_399 { char ifrn_name[16U]; } ;
216 union __anonunion_ifr_ifru_400 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
216 struct ifreq { union __anonunion_ifr_ifrn_399 ifr_ifrn; union __anonunion_ifr_ifru_400 ifr_ifru; } ;
18 typedef s32 compat_time_t;
39 typedef s32 compat_long_t;
45 typedef u32 compat_uptr_t;
46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ;
278 struct compat_robust_list { compat_uptr_t next; } ;
282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
126 struct sk_buff ;
161 struct in6_addr ;
96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
103 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
15 typedef u64 netdev_features_t;
70 union __anonunion_in6_u_426 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ;
70 struct in6_addr { union __anonunion_in6_u_426 in6_u; } ;
46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
205 struct pipe_buf_operations ;
205 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ;
27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ;
63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;
264 struct napi_struct ;
265 struct nf_conntrack { atomic_t use; } ;
254 union __anonunion____missing_field_name_438 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ;
254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_438 __annonCompField93; } ;
278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
310 struct skb_frag_struct ;
310 typedef struct skb_frag_struct skb_frag_t;
311 struct __anonstruct_page_439 { struct page *p; } ;
311 struct skb_frag_struct { struct __anonstruct_page_439 page; __u32 page_offset; __u32 size; } ;
344 struct skb_shared_hwtstamps { ktime_t hwtstamp; } ;
410 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; u32 tskey; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ;
500 typedef unsigned int sk_buff_data_t;
501 struct __anonstruct____missing_field_name_441 { u32 stamp_us; u32 stamp_jiffies; } ;
501 union __anonunion____missing_field_name_440 { u64 v64; struct __anonstruct____missing_field_name_441 __annonCompField94; } ;
501 struct skb_mstamp { union __anonunion____missing_field_name_440 __annonCompField95; } ;
564 union __anonunion____missing_field_name_444 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
564 struct __anonstruct____missing_field_name_443 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_444 __annonCompField96; } ;
564 union __anonunion____missing_field_name_442 { struct __anonstruct____missing_field_name_443 __annonCompField97; struct rb_node rbnode; } ;
564 struct sec_path ;
564 struct __anonstruct____missing_field_name_446 { __u16 csum_start; __u16 csum_offset; } ;
564 union __anonunion____missing_field_name_445 { __wsum csum; struct __anonstruct____missing_field_name_446 __annonCompField99; } ;
564 union __anonunion____missing_field_name_447 { unsigned int napi_id; unsigned int sender_cpu; } ;
564 union __anonunion____missing_field_name_448 { __u32 mark; __u32 reserved_tailroom; } ;
564 union __anonunion____missing_field_name_449 { __be16 inner_protocol; __u8 inner_ipproto; } ;
564 struct sk_buff { union __anonunion____missing_field_name_442 __annonCompField98; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0U]; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; unsigned char __unused; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; unsigned char offload_fwd_mark; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_445 __annonCompField100; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_447 __annonCompField101; __u32 secmark; union __anonunion____missing_field_name_448 __annonCompField102; union __anonunion____missing_field_name_449 __annonCompField103; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
838 struct dst_entry ;
39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
131 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
195 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
239 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ;
251 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
273 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
299 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
328 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
345 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
444 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
481 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
509 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
613 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
645 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
687 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
720 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
736 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
756 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ;
774 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ;
790 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ;
806 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
823 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
842 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
892 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
1063 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
1071 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1147 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
1522 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ;
39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
97 struct __anonstruct_link_modes_453 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ;
97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_453 link_modes; } ;
158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;
375 struct prot_inuse ;
376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
164 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[9U]; } ;
106 struct linux_mib { unsigned long mibs[118U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ;
187 struct ipv4_devconf ;
188 struct fib_rules_ops ;
189 struct fib_table ;
190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ;
24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
29 struct inet_peer_base ;
29 struct xt_table ;
29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; atomic_t rt_genid; } ;
141 struct neighbour ;
141 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ;
40 struct ipv6_devconf ;
40 struct rt6_info ;
40 struct rt6_statistics ;
40 struct fib6_table ;
40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; } ;
89 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
95 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ;
20 struct sctp_mib ;
21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
79 struct nf_logger ;
80 struct nf_queue_handler ;
81 struct nf_hook_entry ;
81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct nf_hook_entry *hooks[13U][8U]; } ;
21 struct ebt_table ;
22 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ;
19 struct hlist_nulls_node ;
19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; unsigned int users; } ;
21 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
26 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
40 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
45 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
50 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; } ;
58 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ;
64 struct ip_conntrack_stat ;
64 struct nf_ct_event_notifier ;
64 struct nf_exp_event_notifier ;
64 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; } ;
96 struct nft_af_info ;
97 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ;
509 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ;
21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ;
30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
87 struct mpls_route ;
88 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ;
16 struct proc_ns_operations ;
17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ;
11 struct net_generic ;
12 struct netns_ipvs ;
13 struct ucounts ;
13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; struct ucounts *ucounts; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
248 struct __anonstruct_possible_net_t_459 { struct net *net; } ;
248 typedef struct __anonstruct_possible_net_t_459 possible_net_t;
296 struct mii_bus ;
303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ;
41 struct mdio_driver_common { struct device_driver driver; int flags; } ;
244 struct phy_device ;
245 enum ldv_31859 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_TRGMII = 16, PHY_INTERFACE_MODE_MAX = 17 } ;
85 typedef enum ldv_31859 phy_interface_t;
133 enum ldv_31911 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ;
140 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_31911 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ;
221 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ;
236 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ;
329 struct phy_driver ;
329 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; int autoneg; int link_timeout; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; void (*adjust_link)(struct net_device *); } ;
431 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ;
844 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ;
27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_PROTO_QCA = 5, DSA_TAG_LAST = 6 } ;
37 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ;
71 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ;
87 struct packet_type ;
88 struct dsa_switch ;
88 struct dsa_device_ops ;
88 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ;
141 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; u8 stp_state; } ;
148 struct dsa_switch_ops ;
148 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_ops *ops; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ;
235 struct switchdev_trans ;
236 struct switchdev_obj ;
237 struct switchdev_obj_port_fdb ;
238 struct switchdev_obj_port_mdb ;
239 struct switchdev_obj_port_vlan ;
240 struct dsa_switch_ops { struct list_head list; const char * (*probe)(struct device *, struct device *, int, void **); enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); void (*port_fast_age)(struct dsa_switch *, int); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *)); } ;
407 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ;
132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ;
144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ;
58 struct mnt_namespace ;
59 struct ipc_namespace ;
60 struct cgroup_namespace ;
61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ;
86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ;
19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ;
31 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; int ucount_max[7U]; } ;
63 struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_t ucount[7U]; } ;
631 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; } ;
686 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
143 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ;
866 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ;
16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; } ;
117 struct netpoll_info ;
118 struct wireless_dev ;
119 struct wpan_dev ;
120 struct mpls_dev ;
121 struct udp_tunnel_info ;
122 struct bpf_prog ;
70 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ;
113 typedef enum netdev_tx netdev_tx_t;
132 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
195 struct neigh_parms ;
196 struct netdev_hw_addr { struct list_head list; unsigned char addr[32U]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; } ;
216 struct netdev_hw_addr_list { struct list_head list; int count; } ;
221 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
250 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ;
301 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ;
347 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
395 typedef enum rx_handler_result rx_handler_result_t;
396 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
541 struct Qdisc ;
541 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ;
612 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
624 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
636 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
688 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
711 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
724 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
735 struct netdev_tc_txq { u16 count; u16 offset; } ;
746 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
762 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ;
790 struct tc_cls_u32_offload ;
791 struct tc_cls_flower_offload ;
791 struct tc_cls_matchall_offload ;
791 struct tc_cls_bpf_offload ;
791 union __anonunion____missing_field_name_469 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; } ;
791 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_469 __annonCompField106; } ;
807 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ;
812 union __anonunion____missing_field_name_470 { struct bpf_prog *prog; bool prog_attached; } ;
812 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_470 __annonCompField107; } ;
835 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;
1371 struct __anonstruct_adj_list_471 { struct list_head upper; struct list_head lower; } ;
1371 struct __anonstruct_all_adj_list_472 { struct list_head upper; struct list_head lower; } ;
1371 struct iw_handler_def ;
1371 struct iw_public_data ;
1371 struct switchdev_ops ;
1371 struct l3mdev_ops ;
1371 struct ndisc_ops ;
1371 struct vlan_info ;
1371 struct tipc_bearer ;
1371 struct in_device ;
1371 struct dn_dev ;
1371 struct inet6_dev ;
1371 struct tcf_proto ;
1371 struct cpu_rmap ;
1371 struct pcpu_lstats ;
1371 struct pcpu_sw_netstats ;
1371 struct pcpu_dstats ;
1371 struct pcpu_vstats ;
1371 union __anonunion____missing_field_name_473 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1371 struct garp_port ;
1371 struct mrp_port ;
1371 struct rtnl_link_ops ;
1371 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_471 adj_list; struct __anonstruct_all_adj_list_472 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct nf_hook_entry *nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; struct hlist_head qdisc_hash[16U]; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_473 __annonCompField108; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ;
2180 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ;
2210 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
3221 enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ;
475 struct macb_platform_data { u32 phy_mask; int phy_irq_pin; u8 is_rmii; u8 rev_eth_addr; } ;
25 struct mfd_cell ;
26 struct platform_device { const char *name; int id; bool id_auto; struct device dev; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; char *driver_override; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; } ;
59 enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 1, OF_GPIO_SINGLE_ENDED = 2 } ;
17 struct macb_dma_desc { u32 addr; u32 ctrl; u32 addrh; u32 resvd; } ;
486 struct macb_tx_skb { struct sk_buff *skb; dma_addr_t mapping; size_t size; bool mapped_as_page; } ;
585 struct macb_stats { u32 rx_pause_frames; u32 tx_ok; u32 tx_single_cols; u32 tx_multiple_cols; u32 rx_ok; u32 rx_fcs_errors; u32 rx_align_errors; u32 tx_deferred; u32 tx_late_cols; u32 tx_excessive_cols; u32 tx_underruns; u32 tx_carrier_errors; u32 rx_resource_errors; u32 rx_overruns; u32 rx_symbol_errors; u32 rx_oversize_pkts; u32 rx_jabbers; u32 rx_undersize_pkts; u32 sqe_test_errors; u32 rx_length_mismatch; u32 tx_pause_frames; } ;
612 struct gem_stats { u32 tx_octets_31_0; u32 tx_octets_47_32; u32 tx_frames; u32 tx_broadcast_frames; u32 tx_multicast_frames; u32 tx_pause_frames; u32 tx_64_byte_frames; u32 tx_65_127_byte_frames; u32 tx_128_255_byte_frames; u32 tx_256_511_byte_frames; u32 tx_512_1023_byte_frames; u32 tx_1024_1518_byte_frames; u32 tx_greater_than_1518_byte_frames; u32 tx_underrun; u32 tx_single_collision_frames; u32 tx_multiple_collision_frames; u32 tx_excessive_collisions; u32 tx_late_collisions; u32 tx_deferred_frames; u32 tx_carrier_sense_errors; u32 rx_octets_31_0; u32 rx_octets_47_32; u32 rx_frames; u32 rx_broadcast_frames; u32 rx_multicast_frames; u32 rx_pause_frames; u32 rx_64_byte_frames; u32 rx_65_127_byte_frames; u32 rx_128_255_byte_frames; u32 rx_256_511_byte_frames; u32 rx_512_1023_byte_frames; u32 rx_1024_1518_byte_frames; u32 rx_greater_than_1518_byte_frames; u32 rx_undersized_frames; u32 rx_oversize_frames; u32 rx_jabbers; u32 rx_frame_check_sequence_errors; u32 rx_length_field_frame_errors; u32 rx_symbol_errors; u32 rx_alignment_errors; u32 rx_resource_errors; u32 rx_overruns; u32 rx_ip_header_checksum_errors; u32 rx_tcp_checksum_errors; u32 rx_udp_checksum_errors; } ;
660 struct gem_statistic { char stat_string[32U]; int offset; u32 stat_bits; } ;
695 struct macb ;
696 struct macb_or_gem_ops { int (*mog_alloc_rx_buffers)(struct macb *); void (*mog_free_rx_buffers)(struct macb *); void (*mog_init_rings)(struct macb *); int (*mog_rx)(struct macb *, int); } ;
770 struct macb_config { u32 caps; unsigned int dma_burst_length; int (*clk_init)(struct platform_device *, struct clk **, struct clk **, struct clk **, struct clk **); int (*init)(struct platform_device *); int jumbo_max_len; } ;
780 struct macb_queue { struct macb *bp; int irq; unsigned int ISR; unsigned int IER; unsigned int IDR; unsigned int IMR; unsigned int TBQP; unsigned int TBQPH; unsigned int tx_head; unsigned int tx_tail; struct macb_dma_desc *tx_ring; struct macb_tx_skb *tx_skb; dma_addr_t tx_ring_dma; struct work_struct tx_error_task; } ;
798 union __anonunion_hw_stats_482 { struct macb_stats macb; struct gem_stats gem; } ;
798 struct macb { void *regs; bool native_io; u32 (*macb_reg_readl)(struct macb *, int); void (*macb_reg_writel)(struct macb *, int, u32 ); unsigned int rx_tail; unsigned int rx_prepared_head; struct macb_dma_desc *rx_ring; struct sk_buff **rx_skbuff; void *rx_buffers; size_t rx_buffer_size; unsigned int num_queues; unsigned int queue_mask; struct macb_queue queues[8U]; spinlock_t lock; struct platform_device *pdev; struct clk *pclk; struct clk *hclk; struct clk *tx_clk; struct clk *rx_clk; struct net_device *dev; struct napi_struct napi; struct net_device_stats stats; union __anonunion_hw_stats_482 hw_stats; dma_addr_t rx_ring_dma; dma_addr_t rx_buffers_dma; struct macb_or_gem_ops macbgem_ops; struct mii_bus *mii_bus; int link; int speed; int duplex; u32 caps; unsigned int dma_burst_length; phy_interface_t phy_interface; struct gpio_desc *reset_gpio; struct sk_buff *skb; dma_addr_t skb_physaddr; int skb_length; unsigned int max_tx_length; u64 ethtool_stats[43U]; unsigned int rx_frm_len_mask; unsigned int jumbo_max_len; u32 wol; } ;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long, long);
241 void __read_once_size(const volatile void *p, void *res, int size);
266 void __write_once_size(volatile void *p, void *res, int size);
34 extern struct module __this_module;
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
204 bool test_and_set_bit(long nr, volatile unsigned long *addr);
308 bool constant_test_bit(long nr, const volatile unsigned long *addr);
63 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);
69 void __dynamic_netdev_dbg(struct _ddebug *, const struct net_device *, const char *, ...);
411 int snprintf(char *, size_t , const char *, ...);
3 bool ldv_is_err(const void *ptr);
6 long int ldv_ptr_err(const void *ptr);
8 void ldv_dma_map_page();
9 void ldv_dma_mapping_error();
32 long int PTR_ERR(const void *ptr);
41 bool IS_ERR(const void *ptr);
25 void INIT_LIST_HEAD(struct list_head *list);
24 int atomic_read(const atomic_t *v);
71 void warn_slowpath_null(const char *, const int);
281 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int);
7 extern unsigned long page_offset_base;
9 extern unsigned long vmemmap_base;
331 extern struct pv_irq_ops pv_irq_ops;
23 unsigned long int __phys_addr(unsigned long);
32 void * __memcpy(void *, const void *, size_t );
760 unsigned long int arch_local_save_flags();
765 void arch_local_irq_restore(unsigned long f);
770 void arch_local_irq_disable();
780 unsigned long int arch_local_irq_save();
155 int arch_irqs_disabled_flags(unsigned long flags);
20 void trace_hardirqs_on();
21 void trace_hardirqs_off();
581 void rep_nop();
586 void cpu_relax();
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
22 void _raw_spin_lock(raw_spinlock_t *);
34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);
41 void _raw_spin_unlock(raw_spinlock_t *);
45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
289 raw_spinlock_t * spinlock_check(spinlock_t *lock);
300 void spin_lock(spinlock_t *lock);
345 void spin_unlock(spinlock_t *lock);
360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
78 extern volatile unsigned long jiffies;
369 unsigned long int __usecs_to_jiffies(const unsigned int);
405 unsigned long int usecs_to_jiffies(const unsigned int u);
181 void __init_work(struct work_struct *, int);
353 extern struct workqueue_struct *system_wq;
430 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *);
471 bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
530 bool schedule_work(struct work_struct *work);
186 int clk_prepare(struct clk *);
205 void clk_unprepare(struct clk *);
249 struct clk * devm_clk_get(struct device *, const char *);
261 int clk_enable(struct clk *);
277 void clk_disable(struct clk *);
284 unsigned long int clk_get_rate(struct clk *);
337 long int clk_round_rate(struct clk *, unsigned long);
346 int clk_set_rate(struct clk *, unsigned long);
483 int clk_prepare_enable(struct clk *clk);
498 void clk_disable_unprepare(struct clk *clk);
62 unsigned int __readl(const volatile void *addr);
70 void __writel(unsigned int val, volatile void *addr);
154 void kfree(const void *);
330 void * __kmalloc(size_t , gfp_t );
478 void * kmalloc(size_t size, gfp_t flags);
634 void * kzalloc(size_t size, gfp_t flags);
123 void of_node_put(struct device_node *);
275 struct device_node * of_get_next_available_child(const struct device_node *, struct device_node *);
328 const void * of_get_property(const struct device_node *, const char *, int *);
337 const struct of_device_id * of_match_node(const struct of_device_id *, const struct device_node *);
683 void * devm_ioremap_resource(struct device *, struct resource *);
103 int device_init_wakeup(struct device *, bool );
104 int device_set_wakeup_enable(struct device *, bool );
915 void * dev_get_drvdata(const struct device *dev);
920 void dev_set_drvdata(struct device *dev, void *data);
1049 void * dev_get_platdata(const struct device *dev);
1138 void dev_err(const struct device *, const char *, ...);
1144 void _dev_info(const struct device *, const char *, ...);
97 int gpiod_direction_output(struct gpio_desc *, int);
102 void gpiod_set_value(struct gpio_desc *, int);
128 int gpiod_to_irq(const struct gpio_desc *);
131 struct gpio_desc * gpio_to_desc(unsigned int);
45 bool gpio_is_valid(int number);
111 int __gpio_to_irq(unsigned int gpio);
68 int gpio_to_irq(unsigned int gpio);
84 int devm_gpio_request(struct device *, unsigned int, const char *);
164 int devm_request_threaded_irq(struct device *, unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *);
170 int devm_request_irq(struct device *dev, unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long irqflags, const char *devname, void *dev_id);
48 void usleep_range(unsigned long, unsigned long);
1003 void * lowmem_page_address(const struct page *page);
131 void kmemcheck_mark_initialized(void *address, unsigned int n);
36 void get_random_bytes(void *, int);
37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );
42 void debug_dma_mapping_error(struct device *, dma_addr_t );
44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
136 int valid_dma_direction(int dma_direction);
28 extern struct dma_map_ops *dma_ops;
30 struct dma_map_ops * get_dma_ops(struct device *dev);
42 bool arch_dma_alloc_attrs(struct device **, gfp_t *);
46 int dma_supported(struct device *, u64 );
180 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
180 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
203 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);
250 dma_addr_t ldv_dma_map_page_6(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
250 dma_addr_t dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
269 void dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
450 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);
491 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
497 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);
517 int ldv_dma_mapping_error_7(struct device *dev, dma_addr_t dma_addr);
517 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
549 int dma_set_mask(struct device *dev, u64 mask);
325 unsigned int skb_frag_size(const skb_frag_t *frag);
904 void consume_skb(struct sk_buff *);
981 int pskb_expand_head(struct sk_buff *, int, int, gfp_t );
1190 unsigned char * skb_end_pointer(const struct sk_buff *skb);
1341 int skb_header_cloned(const struct sk_buff *skb);
1796 unsigned int skb_headlen(const struct sk_buff *skb);
1912 unsigned char * skb_put(struct sk_buff *, unsigned int);
1931 unsigned char * __skb_pull(struct sk_buff *skb, unsigned int len);
1974 unsigned int skb_headroom(const struct sk_buff *skb);
2013 void skb_reserve(struct sk_buff *skb, int len);
2220 unsigned char * skb_checksum_start(const struct sk_buff *skb);
2419 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );
2435 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);
2555 struct page * skb_frag_page(const skb_frag_t *frag);
2673 dma_addr_t skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, size_t offset, size_t size, enum dma_data_direction dir);
2717 int __skb_cow(struct sk_buff *skb, unsigned int headroom, int cloned);
2758 int skb_cow_head(struct sk_buff *skb, unsigned int headroom);
3168 void skb_copy_to_linear_data_offset(struct sk_buff *skb, const int offset, const void *from, const unsigned int len);
3223 void skb_clone_tx_timestamp(struct sk_buff *);
3269 void skb_tstamp_tx(struct sk_buff *, struct skb_shared_hwtstamps *);
3272 void sw_tx_timestamp(struct sk_buff *skb);
3291 void skb_tx_timestamp(struct sk_buff *skb);
3653 u16 skb_get_queue_mapping(const struct sk_buff *skb);
3805 void skb_checksum_none_assert(const struct sk_buff *skb);
83 u32 ethtool_op_get_link(struct net_device *);
84 int ethtool_op_get_ts_info(struct net_device *, struct ethtool_ts_info *);
203 struct mii_bus * mdiobus_alloc_size(size_t );
204 struct mii_bus * mdiobus_alloc();
209 int __mdiobus_register(struct mii_bus *, struct module *);
211 void mdiobus_unregister(struct mii_bus *);
212 void mdiobus_free(struct mii_bus *);
220 struct phy_device * mdiobus_scan(struct mii_bus *, int);
753 struct phy_device * phy_find_first(struct mii_bus *);
756 int phy_connect_direct(struct net_device *, struct phy_device *, void (*)(struct net_device *), phy_interface_t );
762 void phy_disconnect(struct phy_device *);
764 void phy_start(struct phy_device *);
765 void phy_stop(struct phy_device *);
788 void phy_attached_info(struct phy_device *);
815 int phy_mii_ioctl(struct phy_device *, struct ifreq *, int);
835 int phy_ethtool_get_link_ksettings(struct net_device *, struct ethtool_link_ksettings *);
837 int phy_ethtool_set_link_ksettings(struct net_device *, const struct ethtool_link_ksettings *);
398 void __napi_schedule(struct napi_struct *);
401 bool napi_disable_pending(struct napi_struct *n);
415 bool napi_schedule_prep(struct napi_struct *n);
447 bool napi_reschedule(struct napi_struct *napi);
465 void napi_complete(struct napi_struct *n);
502 void napi_disable(struct napi_struct *);
511 void napi_enable(struct napi_struct *n);
1961 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);
2056 void * netdev_priv(const struct net_device *dev);
2087 void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int);
2422 void free_netdev(struct net_device *);
2843 void netif_tx_start_queue(struct netdev_queue *dev_queue);
2854 void netif_start_queue(struct net_device *dev);
2859 void netif_tx_start_all_queues(struct net_device *dev);
2869 void netif_tx_wake_queue(struct netdev_queue *);
2878 void netif_wake_queue(struct net_device *dev);
2893 void netif_tx_stop_queue(struct netdev_queue *dev_queue);
2905 void netif_stop_queue(struct net_device *dev);
2910 void netif_tx_stop_all_queues(struct net_device *);
2912 bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue);
3097 bool netif_running(const struct net_device *dev);
3130 void netif_stop_subqueue(struct net_device *dev, u16 queue_index);
3143 bool __netif_subqueue_stopped(const struct net_device *dev, u16 queue_index);
3157 void netif_wake_subqueue(struct net_device *, u16 );
3227 void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason );
3228 void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason );
3249 void dev_kfree_skb_irq(struct sk_buff *skb);
3259 void dev_kfree_skb_any(struct sk_buff *skb);
3269 int netif_rx(struct sk_buff *);
3271 int netif_receive_skb(struct sk_buff *);
3377 void netif_carrier_on(struct net_device *);
3379 void netif_carrier_off(struct net_device *);
3690 int register_netdev(struct net_device *);
3691 void unregister_netdev(struct net_device *);
4274 void netdev_err(const struct net_device *, const char *, ...);
4276 void netdev_warn(const struct net_device *, const char *, ...);
4280 void netdev_info(const struct net_device *, const char *, ...);
36 __be16 eth_type_trans(struct sk_buff *, struct net_device *);
48 int eth_mac_addr(struct net_device *, void *);
49 int eth_change_mtu(struct net_device *, int);
50 int eth_validate_addr(struct net_device *);
52 struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int);
96 bool is_zero_ether_addr(const u8 *addr);
114 bool is_multicast_ether_addr(const u8 *addr);
189 bool is_valid_ether_addr(const u8 *addr);
221 void eth_random_addr(u8 *addr);
261 void eth_hw_addr_random(struct net_device *dev);
274 void ether_addr_copy(u8 *dst, const u8 *src);
52 struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int);
54 int platform_get_irq(struct platform_device *, unsigned int);
211 void * platform_get_drvdata(const struct platform_device *pdev);
216 void platform_set_drvdata(struct platform_device *pdev, void *data);
51 int of_get_named_gpio_flags(struct device_node *, const char *, int, enum of_gpio_flags *);
140 int of_get_named_gpio(struct device_node *np, const char *propname, int index);
16 int of_mdiobus_register(struct mii_bus *, struct device_node *);
14 int of_get_phy_mode(struct device_node *);
15 const void * of_get_mac_address(struct device_node *);
694 const struct gem_statistic gem_statistics[43U] = { { { 't', 'x', '_', 'o', 'c', 't', 'e', 't', 's', '\x0' }, 256, 0U }, { { 't', 'x', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 264, 0U }, { { 't', 'x', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 268, 0U }, { { 't', 'x', '_', 'm', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 272, 0U }, { { 't', 'x', '_', 'p', 'a', 'u', 's', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 276, 0U }, { { 't', 'x', '_', '6', '4', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 280, 0U }, { { 't', 'x', '_', '6', '5', '_', '1', '2', '7', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 284, 0U }, { { 't', 'x', '_', '1', '2', '8', '_', '2', '5', '5', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 288, 0U }, { { 't', 'x', '_', '2', '5', '6', '_', '5', '1', '1', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 292, 0U }, { { 't', 'x', '_', '5', '1', '2', '_', '1', '0', '2', '3', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 296, 0U }, { { 't', 'x', '_', '1', '0', '2', '4', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 300, 0U }, { { 't', 'x', '_', 'g', 'r', 'e', 'a', 't', 'e', 'r', '_', 't', 'h', 'a', 'n', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's' }, 304, 0U }, { { 't', 'x', '_', 'u', 'n', 'd', 'e', 'r', 'r', 'u', 'n', '\x0' }, 308, 576U }, { { 't', 'x', '_', 's', 'i', 'n', 'g', 'l', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 312, 1088U }, { { 't', 'x', '_', 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 316, 1088U }, { { 't', 'x', '_', 'e', 'x', 'c', 'e', 's', 's', 'i', 'v', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', 's', '\x0' }, 320, 1216U }, { { 't', 'x', '_', 'l', 'a', 't', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', 's', '\x0' }, 324, 1088U }, { { 't', 'x', '_', 'd', 'e', 'f', 'e', 'r', 'r', 'e', 'd', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 328, 0U }, { { 't', 'x', '_', 'c', 'a', 'r', 'r', 'i', 'e', 'r', '_', 's', 'e', 'n', 's', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 332, 1088U }, { { 'r', 'x', '_', 'o', 'c', 't', 'e', 't', 's', '\x0' }, 336, 0U }, { { 'r', 'x', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 344, 0U }, { { 'r', 'x', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 348, 0U }, { { 'r', 'x', '_', 'm', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 352, 0U }, { { 'r', 'x', '_', 'p', 'a', 'u', 's', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 356, 0U }, { { 'r', 'x', '_', '6', '4', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 360, 0U }, { { 'r', 'x', '_', '6', '5', '_', '1', '2', '7', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 364, 0U }, { { 'r', 'x', '_', '1', '2', '8', '_', '2', '5', '5', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 368, 0U }, { { 'r', 'x', '_', '2', '5', '6', '_', '5', '1', '1', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 372, 0U }, { { 'r', 'x', '_', '5', '1', '2', '_', '1', '0', '2', '3', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 376, 0U }, { { 'r', 'x', '_', '1', '0', '2', '4', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 380, 0U }, { { 'r', 'x', '_', 'g', 'r', 'e', 'a', 't', 'e', 'r', '_', 't', 'h', 'a', 'n', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's' }, 384, 0U }, { { 'r', 'x', '_', 'u', 'n', 'd', 'e', 'r', 's', 'i', 'z', 'e', 'd', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 388, 3U }, { { 'r', 'x', '_', 'o', 'v', 'e', 'r', 's', 'i', 'z', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 392, 3U }, { { 'r', 'x', '_', 'j', 'a', 'b', 'b', 'e', 'r', 's', '\x0' }, 396, 3U }, { { 'r', 'x', '_', 'f', 'r', 'a', 'm', 'e', '_', 'c', 'h', 'e', 'c', 'k', '_', 's', 'e', 'q', 'u', 'e', 'n', 'c', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 400, 9U }, { { 'r', 'x', '_', 'l', 'e', 'n', 'g', 't', 'h', '_', 'f', 'i', 'e', 'l', 'd', '_', 'f', 'r', 'a', 'm', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 404, 1U }, { { 'r', 'x', '_', 's', 'y', 'm', 'b', 'o', 'l', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 408, 17U }, { { 'r', 'x', '_', 'a', 'l', 'i', 'g', 'n', 'm', 'e', 'n', 't', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 412, 5U }, { { 'r', 'x', '_', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 416, 5U }, { { 'r', 'x', '_', 'o', 'v', 'e', 'r', 'r', 'u', 'n', 's', '\x0' }, 420, 33U }, { { 'r', 'x', '_', 'i', 'p', '_', 'h', 'e', 'a', 'd', 'e', 'r', '_', 'c', 'h', 'e', 'c', 'k', 's', 'u', 'm', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 424, 1U }, { { 'r', 'x', '_', 't', 'c', 'p', '_', 'c', 'h', 'e', 'c', 'k', 's', 'u', 'm', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 428, 1U }, { { 'r', 'x', '_', 'u', 'd', 'p', '_', 'c', 'h', 'e', 'c', 'k', 's', 'u', 'm', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 432, 1U } };
862 bool macb_is_gem(struct macb *bp);
71 unsigned int macb_tx_ring_wrap(unsigned int index);
76 struct macb_dma_desc * macb_tx_desc(struct macb_queue *queue, unsigned int index);
82 struct macb_tx_skb * macb_tx_skb(struct macb_queue *queue, unsigned int index);
88 dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index);
97 unsigned int macb_rx_ring_wrap(unsigned int index);
102 struct macb_dma_desc * macb_rx_desc(struct macb *bp, unsigned int index);
107 void * macb_rx_buffer(struct macb *bp, unsigned int index);
113 u32 hw_readl_native(struct macb *bp, int offset);
118 void hw_writel_native(struct macb *bp, int offset, u32 value);
123 u32 hw_readl(struct macb *bp, int offset);
128 void hw_writel(struct macb *bp, int offset, u32 value);
137 bool hw_is_native_io(void *addr);
150 bool hw_is_gem(void *addr, bool native_io);
162 void macb_set_hwaddr(struct macb *bp);
181 void macb_get_hwaddr(struct macb *bp);
222 int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
242 int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
267 void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev);
305 void macb_handle_link_change(struct net_device *dev);
372 int macb_mii_probe(struct net_device *dev);
422 int macb_mii_init(struct macb *bp);
495 void macb_update_stats(struct macb *bp);
507 int macb_halt_tx(struct macb *bp);
527 void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb);
545 void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr);
553 void macb_tx_error_task(struct work_struct *work);
659 void macb_tx_interrupt(struct macb_queue *queue);
728 void gem_rx_refill(struct macb *bp);
784 void discard_partial_frame(struct macb *bp, unsigned int begin, unsigned int end);
804 int gem_rx(struct macb *bp, int budget);
886 int macb_rx_frame(struct macb *bp, unsigned int first_frag, unsigned int last_frag);
967 void macb_init_rx_ring(struct macb *bp);
981 int macb_rx(struct macb *bp, int budget);
1056 int macb_poll(struct napi_struct *napi, int budget);
1090 irqreturn_t macb_interrupt(int irq, void *dev_id);
1200 void macb_poll_controller(struct net_device *dev);
1214 unsigned int macb_tx_map(struct macb *bp, struct macb_queue *queue, struct sk_buff *skb);
1343 int macb_clear_csum(struct sk_buff *skb);
1361 int macb_start_xmit(struct sk_buff *skb, struct net_device *dev);
1427 void macb_init_rx_buffer_size(struct macb *bp, size_t size);
1447 void gem_free_rx_buffers(struct macb *bp);
1478 void macb_free_rx_buffers(struct macb *bp);
1488 void macb_free_consistent(struct macb *bp);
1511 int gem_alloc_rx_buffers(struct macb *bp);
1526 int macb_alloc_rx_buffers(struct macb *bp);
1542 int macb_alloc_consistent(struct macb *bp);
1585 void gem_init_rings(struct macb *bp);
1607 void macb_init_rings(struct macb *bp);
1624 void macb_reset_hw(struct macb *bp);
1650 u32 gem_mdc_clk_div(struct macb *bp);
1671 u32 macb_mdc_clk_div(struct macb *bp);
1696 u32 macb_dbw(struct macb *bp);
1719 void macb_configure_dma(struct macb *bp);
1750 void macb_init_hw(struct macb *bp);
1843 int hash_bit_value(int bitnr, __u8 *addr);
1851 int hash_get_index(__u8 *addr);
1867 void macb_sethashtable(struct net_device *dev);
1887 void macb_set_rx_mode(struct net_device *dev);
1929 int macb_open(struct net_device *dev);
1967 int macb_close(struct net_device *dev);
1988 int macb_change_mtu(struct net_device *dev, int new_mtu);
2008 void gem_update_stats(struct macb *bp);
2029 struct net_device_stats * gem_get_stats(struct macb *bp);
2067 void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data);
2077 int gem_get_sset_count(struct net_device *dev, int sset);
2087 void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p);
2100 struct net_device_stats * macb_get_stats(struct net_device *dev);
2147 int macb_get_regs_len(struct net_device *netdev);
2152 void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p);
2185 void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
2200 int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
2218 const struct ethtool_ops macb_ethtool_ops = { 0, 0, 0, &macb_get_regs_len, &macb_get_regs, &macb_get_wol, &macb_set_wol, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ðtool_op_get_ts_info, 0, 0, 0, 0, 0, 0, 0, 0, &phy_ethtool_get_link_ksettings, &phy_ethtool_set_link_ksettings };
2229 const struct ethtool_ops gem_ethtool_ops = { 0, 0, 0, &macb_get_regs_len, &macb_get_regs, 0, 0, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &gem_get_ethtool_strings, 0, &gem_get_ethtool_stats, 0, 0, 0, 0, &gem_get_sset_count, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ðtool_op_get_ts_info, 0, 0, 0, 0, 0, 0, 0, 0, &phy_ethtool_get_link_ksettings, &phy_ethtool_set_link_ksettings };
2241 int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
2254 int macb_set_features(struct net_device *netdev, netdev_features_t features);
2288 const struct net_device_ops macb_netdev_ops = { 0, 0, &macb_open, &macb_close, (netdev_tx_t (*)(struct sk_buff *, struct net_device *))(&macb_start_xmit), 0, 0, 0, &macb_set_rx_mode, ð_mac_addr, ð_validate_addr, &macb_ioctl, 0, &macb_change_mtu, 0, 0, 0, 0, 0, &macb_get_stats, 0, 0, &macb_poll_controller, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &macb_set_features, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2307 void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf);
2329 void macb_probe_queues(void *mem, bool native_io, unsigned int *queue_mask, unsigned int *num_queues);
2358 int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk);
2424 int macb_init(struct platform_device *pdev);
2545 int at91ether_start(struct net_device *dev);
2596 int at91ether_open(struct net_device *dev);
2630 int at91ether_close(struct net_device *dev);
2665 int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev);
2694 void at91ether_rx(struct net_device *dev);
2732 irqreturn_t at91ether_interrupt(int irq, void *dev_id);
2778 void at91ether_poll_controller(struct net_device *dev);
2788 const struct net_device_ops at91ether_netdev_ops = { 0, 0, &at91ether_open, &at91ether_close, (netdev_tx_t (*)(struct sk_buff *, struct net_device *))(&at91ether_start_xmit), 0, 0, 0, &macb_set_rx_mode, ð_mac_addr, ð_validate_addr, &macb_ioctl, 0, ð_change_mtu, 0, 0, 0, 0, 0, &macb_get_stats, 0, 0, &at91ether_poll_controller, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2803 int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk);
2826 int at91ether_init(struct platform_device *pdev);
2852 const struct macb_config at91sam9260_config = { 6U, 0U, &macb_clk_init, &macb_init, 0 };
2858 const struct macb_config pc302gem_config = { 1610612736U, 16U, &macb_clk_init, &macb_init, 0 };
2865 const struct macb_config sama5d2_config = { 4U, 16U, &macb_clk_init, &macb_init, 0 };
2872 const struct macb_config sama5d3_config = { 1610612740U, 16U, &macb_clk_init, &macb_init, 0 };
2880 const struct macb_config sama5d4_config = { 4U, 4U, &macb_clk_init, &macb_init, 0 };
2887 const struct macb_config emac_config = { 0U, 0U, &at91ether_clk_init, &at91ether_init, 0 };
2892 const struct macb_config np4_config = { 16U, 0U, &macb_clk_init, &macb_init, 0 };
2898 const struct macb_config zynqmp_config = { 536870944U, 16U, &macb_clk_init, &macb_init, 10240 };
2906 const struct macb_config zynq_config = { 536870920U, 16U, &macb_clk_init, &macb_init, 0 };
2913 const struct of_device_id macb_dt_ids[14U] = { { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'a', 't', '3', '2', 'a', 'p', '7', '0', '0', '0', '-', 'm', 'a', 'c', 'b', '\x0' }, 0 }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'a', 't', '9', '1', 's', 'a', 'm', '9', '2', '6', '0', '-', 'm', 'a', 'c', 'b', '\x0' }, (const void *)(&at91sam9260_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'm', 'a', 'c', 'b', '\x0' }, 0 }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'n', 'p', '4', '-', 'm', 'a', 'c', 'b', '\x0' }, (const void *)(&np4_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'p', 'c', '3', '0', '2', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&pc302gem_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'g', 'e', 'm', '\x0' }, (const void *)(&pc302gem_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'a', 't', 'm', 'e', 'l', ',', 's', 'a', 'm', 'a', '5', 'd', '2', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&sama5d2_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'a', 't', 'm', 'e', 'l', ',', 's', 'a', 'm', 'a', '5', 'd', '3', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&sama5d3_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'a', 't', 'm', 'e', 'l', ',', 's', 'a', 'm', 'a', '5', 'd', '4', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&sama5d4_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'a', 't', '9', '1', 'r', 'm', '9', '2', '0', '0', '-', 'e', 'm', 'a', 'c', '\x0' }, (const void *)(&emac_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'e', 'm', 'a', 'c', '\x0' }, (const void *)(&emac_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'z', 'y', 'n', 'q', 'm', 'p', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&zynqmp_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'z', 'y', 'n', 'q', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&zynq_config) } };
2929 const struct of_device_id __mod_of__macb_dt_ids_device_table[14U] = { };
2932 int macb_probe(struct platform_device *pdev);
3109 int macb_remove(struct platform_device *pdev);
3219 void ldv_check_final_state();
3222 void ldv_check_return_value(int);
3225 void ldv_check_return_value_probe(int);
3228 void ldv_initialize();
3231 void ldv_handler_precall();
3234 int nondet_int();
3237 int LDV_IN_INTERRUPT = 0;
3240 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
14 void * ldv_err_ptr(long error);
28 bool ldv_is_err_or_null(const void *ptr);
5 int LDV_DMA_MAP_CALLS = 0;
return ;
}
-entry_point
{
3242 struct net_device *var_group1;
3243 struct ethtool_regs *var_group2;
3244 void *var_macb_get_regs_68_p2;
3245 struct ethtool_wolinfo *var_group3;
3246 struct ethtool_stats *var_group4;
3247 u64 *var_gem_get_ethtool_stats_63_p2;
3248 unsigned int var_gem_get_ethtool_strings_65_p1;
3249 u8 *var_gem_get_ethtool_strings_65_p2;
3250 int var_gem_get_sset_count_64_p1;
3251 int res_macb_open_58;
3252 int res_macb_close_59;
3253 struct sk_buff *var_group5;
3254 struct ifreq *var_group6;
3255 int var_macb_ioctl_71_p2;
3256 int var_macb_change_mtu_60_p1;
3257 unsigned long long var_macb_set_features_72_p1;
3258 int res_at91ether_open_78;
3259 int res_at91ether_close_79;
3260 struct platform_device *var_group7;
3261 struct clk **var_group8;
3262 struct clk **var_macb_clk_init_75_p2;
3263 struct clk **var_macb_clk_init_75_p3;
3264 struct clk **var_macb_clk_init_75_p4;
3265 struct clk **var_at91ether_clk_init_84_p2;
3266 struct clk **var_at91ether_clk_init_84_p3;
3267 struct clk **var_at91ether_clk_init_84_p4;
3268 int res_macb_probe_86;
3269 int var_at91ether_interrupt_82_p0;
3270 void *var_at91ether_interrupt_82_p1;
3271 int var_macb_interrupt_34_p0;
3272 void *var_macb_interrupt_34_p1;
3273 int ldv_s_macb_netdev_ops_net_device_ops;
3274 int ldv_s_at91ether_netdev_ops_net_device_ops;
3275 int ldv_s_macb_driver_platform_driver;
3276 int tmp;
3277 int tmp___0;
6185 ldv_s_macb_netdev_ops_net_device_ops = 0;
6188 ldv_s_at91ether_netdev_ops_net_device_ops = 0;
6209 ldv_s_macb_driver_platform_driver = 0;
6171 LDV_IN_INTERRUPT = 1;
6180 ldv_initialize() { /* Function call is skipped due to function is undefined */}
6214 goto ldv_51130;
6214 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
6214 assume(tmp___0 != 0);
6219 goto ldv_51129;
6215 ldv_51129:;
6220 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
6220 switch (tmp);
6221 assume(!(tmp == 0));
6298 assume(!(tmp == 1));
6374 assume(!(tmp == 2));
6450 assume(!(tmp == 3));
6526 assume(!(tmp == 4));
6602 assume(!(tmp == 5));
6678 assume(!(tmp == 6));
6754 assume(!(tmp == 7));
6830 assume(!(tmp == 8));
6906 assume(!(tmp == 9));
6985 assume(!(tmp == 10));
7064 assume(!(tmp == 11));
7138 assume(!(tmp == 12));
7214 assume(!(tmp == 13));
7290 assume(!(tmp == 14));
7366 assume(!(tmp == 15));
7442 assume(!(tmp == 16));
7518 assume(!(tmp == 17));
7594 assume(!(tmp == 18));
7673 assume(!(tmp == 19));
7752 assume(tmp == 20);
7810 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
7811 -at91ether_start_xmit(var_group5, var_group1)
{
2667 struct macb *lp;
2668 void *tmp;
2669 unsigned int tmp___0;
2667 -netdev_priv((const struct net_device *)dev)
{
2058 return ((void *)dev) + 3200U;;
}
2667 lp = (struct macb *)tmp;
2669 assume(!((lp->macb_reg_readl) == (&macb_rx)));
2669 assume(!((lp->macb_reg_readl) == (&macb_poll)));
2669 assume(!((lp->macb_reg_readl) == (&hw_readl)));
2669 assume(!((lp->macb_reg_readl) == (&hw_readl_native)));
2669 assume(!((lp->macb_reg_readl) == (&macb_change_mtu)));
2669 assume(!((lp->macb_reg_readl) == (&gem_rx)));
2669 assume(!((lp->macb_reg_readl) == (&gem_get_sset_count)));
2669 tmp___0 = (*(lp->macb_reg_readl))(lp, 20);
2669 assume((tmp___0 & 16U) != 0U);
2670 -netif_stop_queue(dev)
{
2907 struct netdev_queue *tmp;
2907 -netdev_get_tx_queue((const struct net_device *)dev, 0U)
{
1964 struct netdev_queue *__CPAchecker_TMP_0 = (struct netdev_queue *)(dev->_tx);
1964 return __CPAchecker_TMP_0 + ((unsigned long)index);;
}
2907 -netif_tx_stop_queue(tmp)
{
2895 -set_bit(0L, (volatile unsigned long *)(&(dev_queue->state)))
{
80 Ignored inline assembler code
82 return ;;
}
2896 return ;;
}
2908 return ;;
}
2673 lp->skb = skb;
2674 int __CPAchecker_TMP_0 = (int)(skb->len);
2674 lp->skb_length = __CPAchecker_TMP_0;
2675 void *__CPAchecker_TMP_1 = (void *)(skb->data);
2675 size_t __CPAchecker_TMP_2 = (size_t )(skb->len);
2675 -dma_map_single_attrs((struct device *)0, __CPAchecker_TMP_1, __CPAchecker_TMP_2, 1, 0UL)
{
38 unsigned long long tmp;
38 -ldv_dma_map_page()
{
10 assume(!(LDV_DMA_MAP_CALLS != 0));
12 LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1;
13 return ;;
}
40 -ldv_dma_map_single_attrs_5(dev, ptr, size, dir, attrs)
{
184 struct dma_map_ops *ops;
185 struct dma_map_ops *tmp;
186 unsigned long long addr;
187 int tmp___0;
188 long tmp___1;
189 unsigned long tmp___2;
190 unsigned long tmp___3;
185 -get_dma_ops(dev)
{
32 long tmp;
35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */}
35 assume(!(tmp != 0L));
35 assume(!(((unsigned long)(dev->archdata.dma_ops)) == ((unsigned long)((struct dma_map_ops *)0))));
38 return dev->archdata.dma_ops;;
}
185 ops = tmp;
188 -kmemcheck_mark_initialized(ptr, (unsigned int)size)
{
133 return ;;
}
189 -valid_dma_direction((int)dir)
{
138 int __CPAchecker_TMP_0;
138 assume(!(dma_direction == 0));
138 assume(dma_direction == 1);
__CPAchecker_TMP_0 = 1;
138 return __CPAchecker_TMP_0;;
}
189 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */}
189 assume(!(tmp___1 != 0L));
190 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}
190 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs);
193 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}
193 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */}
196 return addr;;
}
40 return tmp;;
}
2679 u32 __CPAchecker_TMP_3 = (u32 )(lp->skb_physaddr);
2679 assume(!((lp->macb_reg_writel) == (&hw_writel)));
2679 assume(!((lp->macb_reg_writel) == (&hw_writel_native)));
2679 (*(lp->macb_reg_writel))(lp, 12, __CPAchecker_TMP_3);
2681 assume(!((lp->macb_reg_writel) == (&hw_writel)));
2681 assume(!((lp->macb_reg_writel) == (&hw_writel_native)));
2681 (*(lp->macb_reg_writel))(lp, 16, skb->len);
2688 return 0;;
}
7827 goto ldv_51080;
9771 ldv_51080:;
9772 ldv_51130:;
6214 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
6214 assume(tmp___0 != 0);
6219 goto ldv_51129;
6215 ldv_51129:;
6220 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
6220 switch (tmp);
6221 assume(!(tmp == 0));
6298 assume(!(tmp == 1));
6374 assume(!(tmp == 2));
6450 assume(!(tmp == 3));
6526 assume(!(tmp == 4));
6602 assume(!(tmp == 5));
6678 assume(!(tmp == 6));
6754 assume(!(tmp == 7));
6830 assume(!(tmp == 8));
6906 assume(!(tmp == 9));
6985 assume(!(tmp == 10));
7064 assume(!(tmp == 11));
7138 assume(!(tmp == 12));
7214 assume(!(tmp == 13));
7290 assume(!(tmp == 14));
7366 assume(!(tmp == 15));
7442 assume(!(tmp == 16));
7518 assume(!(tmp == 17));
7594 assume(!(tmp == 18));
7673 assume(!(tmp == 19));
7752 assume(tmp == 20);
7810 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
7811 -at91ether_start_xmit(var_group5, var_group1)
{
2667 struct macb *lp;
2668 void *tmp;
2669 unsigned int tmp___0;
2667 -netdev_priv((const struct net_device *)dev)
{
2058 return ((void *)dev) + 3200U;;
}
2667 lp = (struct macb *)tmp;
2669 assume(!((lp->macb_reg_readl) == (&macb_rx)));
2669 assume(!((lp->macb_reg_readl) == (&macb_poll)));
2669 assume(!((lp->macb_reg_readl) == (&hw_readl)));
2669 assume(!((lp->macb_reg_readl) == (&hw_readl_native)));
2669 assume(!((lp->macb_reg_readl) == (&macb_change_mtu)));
2669 assume(!((lp->macb_reg_readl) == (&gem_rx)));
2669 assume(!((lp->macb_reg_readl) == (&gem_get_sset_count)));
2669 tmp___0 = (*(lp->macb_reg_readl))(lp, 20);
2669 assume((tmp___0 & 16U) != 0U);
2670 -netif_stop_queue(dev)
{
2907 struct netdev_queue *tmp;
2907 -netdev_get_tx_queue((const struct net_device *)dev, 0U)
{
1964 struct netdev_queue *__CPAchecker_TMP_0 = (struct netdev_queue *)(dev->_tx);
1964 return __CPAchecker_TMP_0 + ((unsigned long)index);;
}
2907 -netif_tx_stop_queue(tmp)
{
2895 -set_bit(0L, (volatile unsigned long *)(&(dev_queue->state)))
{
80 Ignored inline assembler code
82 return ;;
}
2896 return ;;
}
2908 return ;;
}
2673 lp->skb = skb;
2674 int __CPAchecker_TMP_0 = (int)(skb->len);
2674 lp->skb_length = __CPAchecker_TMP_0;
2675 void *__CPAchecker_TMP_1 = (void *)(skb->data);
2675 size_t __CPAchecker_TMP_2 = (size_t )(skb->len);
2675 -dma_map_single_attrs((struct device *)0, __CPAchecker_TMP_1, __CPAchecker_TMP_2, 1, 0UL)
{
38 unsigned long long tmp;
38 -ldv_dma_map_page()
{
10 assume(LDV_DMA_MAP_CALLS != 0);
10 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_BITOPS_H 2 #define _ASM_X86_BITOPS_H 3 4 /* 5 * Copyright 1992, Linus Torvalds. 6 * 7 * Note: inlines with more than a single statement should be marked 8 * __always_inline to avoid problems with older gcc's inlining heuristics. 9 */ 10 11 #ifndef _LINUX_BITOPS_H 12 #error only <linux/bitops.h> can be included directly 13 #endif 14 15 #include <linux/compiler.h> 16 #include <asm/alternative.h> 17 #include <asm/rmwcc.h> 18 #include <asm/barrier.h> 19 20 #if BITS_PER_LONG == 32 21 # define _BITOPS_LONG_SHIFT 5 22 #elif BITS_PER_LONG == 64 23 # define _BITOPS_LONG_SHIFT 6 24 #else 25 # error "Unexpected BITS_PER_LONG" 26 #endif 27 28 #define BIT_64(n) (U64_C(1) << (n)) 29 30 /* 31 * These have to be done with inline assembly: that way the bit-setting 32 * is guaranteed to be atomic. All bit operations return 0 if the bit 33 * was cleared before the operation and != 0 if it was not. 34 * 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 36 */ 37 38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) 39 /* Technically wrong, but this avoids compilation errors on some gcc 40 versions. */ 41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) 42 #else 43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) 44 #endif 45 46 #define ADDR BITOP_ADDR(addr) 47 48 /* 49 * We do the locked ops that don't return the old value as 50 * a mask operation on a byte. 51 */ 52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) 53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) 54 #define CONST_MASK(nr) (1 << ((nr) & 7)) 55 56 /** 57 * set_bit - Atomically set a bit in memory 58 * @nr: the bit to set 59 * @addr: the address to start counting from 60 * 61 * This function is atomic and may not be reordered. See __set_bit() 62 * if you do not require the atomic guarantees. 63 * 64 * Note: there are no guarantees that this function will not be reordered 65 * on non x86 architectures, so if you are writing portable code, 66 * make sure not to rely on its reordering guarantees. 67 * 68 * Note that @nr may be almost arbitrarily large; this function is not 69 * restricted to acting on a single-word quantity. 70 */ 71 static __always_inline void 72 set_bit(long nr, volatile unsigned long *addr) 73 { 74 if (IS_IMMEDIATE(nr)) { 75 asm volatile(LOCK_PREFIX "orb %1,%0" 76 : CONST_MASK_ADDR(nr, addr) 77 : "iq" ((u8)CONST_MASK(nr)) 78 : "memory"); 79 } else { 80 asm volatile(LOCK_PREFIX "bts %1,%0" 81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); 82 } 83 } 84 85 /** 86 * __set_bit - Set a bit in memory 87 * @nr: the bit to set 88 * @addr: the address to start counting from 89 * 90 * Unlike set_bit(), this function is non-atomic and may be reordered. 91 * If it's called on the same region of memory simultaneously, the effect 92 * may be that only one operation succeeds. 93 */ 94 static __always_inline void __set_bit(long nr, volatile unsigned long *addr) 95 { 96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 97 } 98 99 /** 100 * clear_bit - Clears a bit in memory 101 * @nr: Bit to clear 102 * @addr: Address to start counting from 103 * 104 * clear_bit() is atomic and may not be reordered. However, it does 105 * not contain a memory barrier, so if it is used for locking purposes, 106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 107 * in order to ensure changes are visible on other processors. 108 */ 109 static __always_inline void 110 clear_bit(long nr, volatile unsigned long *addr) 111 { 112 if (IS_IMMEDIATE(nr)) { 113 asm volatile(LOCK_PREFIX "andb %1,%0" 114 : CONST_MASK_ADDR(nr, addr) 115 : "iq" ((u8)~CONST_MASK(nr))); 116 } else { 117 asm volatile(LOCK_PREFIX "btr %1,%0" 118 : BITOP_ADDR(addr) 119 : "Ir" (nr)); 120 } 121 } 122 123 /* 124 * clear_bit_unlock - Clears a bit in memory 125 * @nr: Bit to clear 126 * @addr: Address to start counting from 127 * 128 * clear_bit() is atomic and implies release semantics before the memory 129 * operation. It can be used for an unlock. 130 */ 131 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr) 132 { 133 barrier(); 134 clear_bit(nr, addr); 135 } 136 137 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) 138 { 139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 140 } 141 142 /* 143 * __clear_bit_unlock - Clears a bit in memory 144 * @nr: Bit to clear 145 * @addr: Address to start counting from 146 * 147 * __clear_bit() is non-atomic and implies release semantics before the memory 148 * operation. It can be used for an unlock if no other CPUs can concurrently 149 * modify other bits in the word. 150 * 151 * No memory barrier is required here, because x86 cannot reorder stores past 152 * older loads. Same principle as spin_unlock. 153 */ 154 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) 155 { 156 barrier(); 157 __clear_bit(nr, addr); 158 } 159 160 /** 161 * __change_bit - Toggle a bit in memory 162 * @nr: the bit to change 163 * @addr: the address to start counting from 164 * 165 * Unlike change_bit(), this function is non-atomic and may be reordered. 166 * If it's called on the same region of memory simultaneously, the effect 167 * may be that only one operation succeeds. 168 */ 169 static __always_inline void __change_bit(long nr, volatile unsigned long *addr) 170 { 171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 172 } 173 174 /** 175 * change_bit - Toggle a bit in memory 176 * @nr: Bit to change 177 * @addr: Address to start counting from 178 * 179 * change_bit() is atomic and may not be reordered. 180 * Note that @nr may be almost arbitrarily large; this function is not 181 * restricted to acting on a single-word quantity. 182 */ 183 static __always_inline void change_bit(long nr, volatile unsigned long *addr) 184 { 185 if (IS_IMMEDIATE(nr)) { 186 asm volatile(LOCK_PREFIX "xorb %1,%0" 187 : CONST_MASK_ADDR(nr, addr) 188 : "iq" ((u8)CONST_MASK(nr))); 189 } else { 190 asm volatile(LOCK_PREFIX "btc %1,%0" 191 : BITOP_ADDR(addr) 192 : "Ir" (nr)); 193 } 194 } 195 196 /** 197 * test_and_set_bit - Set a bit and return its old value 198 * @nr: Bit to set 199 * @addr: Address to count from 200 * 201 * This operation is atomic and cannot be reordered. 202 * It also implies a memory barrier. 203 */ 204 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 205 { 206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); 207 } 208 209 /** 210 * test_and_set_bit_lock - Set a bit and return its old value for lock 211 * @nr: Bit to set 212 * @addr: Address to count from 213 * 214 * This is the same as test_and_set_bit on x86. 215 */ 216 static __always_inline bool 217 test_and_set_bit_lock(long nr, volatile unsigned long *addr) 218 { 219 return test_and_set_bit(nr, addr); 220 } 221 222 /** 223 * __test_and_set_bit - Set a bit and return its old value 224 * @nr: Bit to set 225 * @addr: Address to count from 226 * 227 * This operation is non-atomic and can be reordered. 228 * If two examples of this operation race, one can appear to succeed 229 * but actually fail. You must protect multiple accesses with a lock. 230 */ 231 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) 232 { 233 bool oldbit; 234 235 asm("bts %2,%1\n\t" 236 CC_SET(c) 237 : CC_OUT(c) (oldbit), ADDR 238 : "Ir" (nr)); 239 return oldbit; 240 } 241 242 /** 243 * test_and_clear_bit - Clear a bit and return its old value 244 * @nr: Bit to clear 245 * @addr: Address to count from 246 * 247 * This operation is atomic and cannot be reordered. 248 * It also implies a memory barrier. 249 */ 250 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 251 { 252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); 253 } 254 255 /** 256 * __test_and_clear_bit - Clear a bit and return its old value 257 * @nr: Bit to clear 258 * @addr: Address to count from 259 * 260 * This operation is non-atomic and can be reordered. 261 * If two examples of this operation race, one can appear to succeed 262 * but actually fail. You must protect multiple accesses with a lock. 263 * 264 * Note: the operation is performed atomically with respect to 265 * the local CPU, but not other CPUs. Portable code should not 266 * rely on this behaviour. 267 * KVM relies on this behaviour on x86 for modifying memory that is also 268 * accessed from a hypervisor on the same CPU if running in a VM: don't change 269 * this without also updating arch/x86/kernel/kvm.c 270 */ 271 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) 272 { 273 bool oldbit; 274 275 asm volatile("btr %2,%1\n\t" 276 CC_SET(c) 277 : CC_OUT(c) (oldbit), ADDR 278 : "Ir" (nr)); 279 return oldbit; 280 } 281 282 /* WARNING: non atomic and it can be reordered! */ 283 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) 284 { 285 bool oldbit; 286 287 asm volatile("btc %2,%1\n\t" 288 CC_SET(c) 289 : CC_OUT(c) (oldbit), ADDR 290 : "Ir" (nr) : "memory"); 291 292 return oldbit; 293 } 294 295 /** 296 * test_and_change_bit - Change a bit and return its old value 297 * @nr: Bit to change 298 * @addr: Address to count from 299 * 300 * This operation is atomic and cannot be reordered. 301 * It also implies a memory barrier. 302 */ 303 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 304 { 305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); 306 } 307 308 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) 309 { 310 return ((1UL << (nr & (BITS_PER_LONG-1))) & 311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; 312 } 313 314 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) 315 { 316 bool oldbit; 317 318 asm volatile("bt %2,%1\n\t" 319 CC_SET(c) 320 : CC_OUT(c) (oldbit) 321 : "m" (*(unsigned long *)addr), "Ir" (nr)); 322 323 return oldbit; 324 } 325 326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */ 327 /** 328 * test_bit - Determine whether a bit is set 329 * @nr: bit number to test 330 * @addr: Address to start counting from 331 */ 332 static bool test_bit(int nr, const volatile unsigned long *addr); 333 #endif 334 335 #define test_bit(nr, addr) \ 336 (__builtin_constant_p((nr)) \ 337 ? constant_test_bit((nr), (addr)) \ 338 : variable_test_bit((nr), (addr))) 339 340 /** 341 * __ffs - find first set bit in word 342 * @word: The word to search 343 * 344 * Undefined if no bit exists, so code should check against 0 first. 345 */ 346 static __always_inline unsigned long __ffs(unsigned long word) 347 { 348 asm("rep; bsf %1,%0" 349 : "=r" (word) 350 : "rm" (word)); 351 return word; 352 } 353 354 /** 355 * ffz - find first zero bit in word 356 * @word: The word to search 357 * 358 * Undefined if no zero exists, so code should check against ~0UL first. 359 */ 360 static __always_inline unsigned long ffz(unsigned long word) 361 { 362 asm("rep; bsf %1,%0" 363 : "=r" (word) 364 : "r" (~word)); 365 return word; 366 } 367 368 /* 369 * __fls: find last set bit in word 370 * @word: The word to search 371 * 372 * Undefined if no set bit exists, so code should check against 0 first. 373 */ 374 static __always_inline unsigned long __fls(unsigned long word) 375 { 376 asm("bsr %1,%0" 377 : "=r" (word) 378 : "rm" (word)); 379 return word; 380 } 381 382 #undef ADDR 383 384 #ifdef __KERNEL__ 385 /** 386 * ffs - find first set bit in word 387 * @x: the word to search 388 * 389 * This is defined the same way as the libc and compiler builtin ffs 390 * routines, therefore differs in spirit from the other bitops. 391 * 392 * ffs(value) returns 0 if value is 0 or the position of the first 393 * set bit if value is nonzero. The first (least significant) bit 394 * is at position 1. 395 */ 396 static __always_inline int ffs(int x) 397 { 398 int r; 399 400 #ifdef CONFIG_X86_64 401 /* 402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the 403 * dest reg is undefined if x==0, but their CPU architect says its 404 * value is written to set it to the same as before, except that the 405 * top 32 bits will be cleared. 406 * 407 * We cannot do this on 32 bits because at the very least some 408 * 486 CPUs did not behave this way. 409 */ 410 asm("bsfl %1,%0" 411 : "=r" (r) 412 : "rm" (x), "0" (-1)); 413 #elif defined(CONFIG_X86_CMOV) 414 asm("bsfl %1,%0\n\t" 415 "cmovzl %2,%0" 416 : "=&r" (r) : "rm" (x), "r" (-1)); 417 #else 418 asm("bsfl %1,%0\n\t" 419 "jnz 1f\n\t" 420 "movl $-1,%0\n" 421 "1:" : "=r" (r) : "rm" (x)); 422 #endif 423 return r + 1; 424 } 425 426 /** 427 * fls - find last set bit in word 428 * @x: the word to search 429 * 430 * This is defined in a similar way as the libc and compiler builtin 431 * ffs, but returns the position of the most significant set bit. 432 * 433 * fls(value) returns 0 if value is 0 or the position of the last 434 * set bit if value is nonzero. The last (most significant) bit is 435 * at position 32. 436 */ 437 static __always_inline int fls(int x) 438 { 439 int r; 440 441 #ifdef CONFIG_X86_64 442 /* 443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the 444 * dest reg is undefined if x==0, but their CPU architect says its 445 * value is written to set it to the same as before, except that the 446 * top 32 bits will be cleared. 447 * 448 * We cannot do this on 32 bits because at the very least some 449 * 486 CPUs did not behave this way. 450 */ 451 asm("bsrl %1,%0" 452 : "=r" (r) 453 : "rm" (x), "0" (-1)); 454 #elif defined(CONFIG_X86_CMOV) 455 asm("bsrl %1,%0\n\t" 456 "cmovzl %2,%0" 457 : "=&r" (r) : "rm" (x), "rm" (-1)); 458 #else 459 asm("bsrl %1,%0\n\t" 460 "jnz 1f\n\t" 461 "movl $-1,%0\n" 462 "1:" : "=r" (r) : "rm" (x)); 463 #endif 464 return r + 1; 465 } 466 467 /** 468 * fls64 - find last set bit in a 64-bit word 469 * @x: the word to search 470 * 471 * This is defined in a similar way as the libc and compiler builtin 472 * ffsll, but returns the position of the most significant set bit. 473 * 474 * fls64(value) returns 0 if value is 0 or the position of the last 475 * set bit if value is nonzero. The last (most significant) bit is 476 * at position 64. 477 */ 478 #ifdef CONFIG_X86_64 479 static __always_inline int fls64(__u64 x) 480 { 481 int bitpos = -1; 482 /* 483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the 484 * dest reg is undefined if x==0, but their CPU architect says its 485 * value is written to set it to the same as before. 486 */ 487 asm("bsrq %1,%q0" 488 : "+r" (bitpos) 489 : "rm" (x)); 490 return bitpos + 1; 491 } 492 #else 493 #include <asm-generic/bitops/fls64.h> 494 #endif 495 496 #include <asm-generic/bitops/find.h> 497 498 #include <asm-generic/bitops/sched.h> 499 500 #include <asm/arch_hweight.h> 501 502 #include <asm-generic/bitops/const_hweight.h> 503 504 #include <asm-generic/bitops/le.h> 505 506 #include <asm-generic/bitops/ext2-atomic-setbit.h> 507 508 #endif /* __KERNEL__ */ 509 #endif /* _ASM_X86_BITOPS_H */
1 #ifndef _ASM_X86_DMA_MAPPING_H 2 #define _ASM_X86_DMA_MAPPING_H 3 4 /* 5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and 6 * Documentation/DMA-API.txt for documentation. 7 */ 8 9 #include <linux/kmemcheck.h> 10 #include <linux/scatterlist.h> 11 #include <linux/dma-debug.h> 12 #include <asm/io.h> 13 #include <asm/swiotlb.h> 14 #include <linux/dma-contiguous.h> 15 16 #ifdef CONFIG_ISA 17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) 18 #else 19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) 20 #endif 21 22 #define DMA_ERROR_CODE 0 23 24 extern int iommu_merge; 25 extern struct device x86_dma_fallback_dev; 26 extern int panic_on_overflow; 27 28 extern struct dma_map_ops *dma_ops; 29 30 static inline struct dma_map_ops *get_dma_ops(struct device *dev) 31 { 32 #ifndef CONFIG_X86_DEV_DMA_OPS 33 return dma_ops; 34 #else 35 if (unlikely(!dev) || !dev->archdata.dma_ops) 36 return dma_ops; 37 else 38 return dev->archdata.dma_ops; 39 #endif 40 } 41 42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); 43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs 44 45 #define HAVE_ARCH_DMA_SUPPORTED 1 46 extern int dma_supported(struct device *hwdev, u64 mask); 47 48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 49 dma_addr_t *dma_addr, gfp_t flag, 50 unsigned long attrs); 51 52 extern void dma_generic_free_coherent(struct device *dev, size_t size, 53 void *vaddr, dma_addr_t dma_addr, 54 unsigned long attrs); 55 56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 60 #else 61 62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 63 { 64 if (!dev->dma_mask) 65 return 0; 66 67 return addr + size - 1 <= *dev->dma_mask; 68 } 69 70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 71 { 72 return paddr; 73 } 74 75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 76 { 77 return daddr; 78 } 79 #endif /* CONFIG_X86_DMA_REMAP */ 80 81 static inline void 82 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 83 enum dma_data_direction dir) 84 { 85 flush_write_buffers(); 86 } 87 88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 89 gfp_t gfp) 90 { 91 unsigned long dma_mask = 0; 92 93 dma_mask = dev->coherent_dma_mask; 94 if (!dma_mask) 95 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); 96 97 return dma_mask; 98 } 99 100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 101 { 102 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 103 104 if (dma_mask <= DMA_BIT_MASK(24)) 105 gfp |= GFP_DMA; 106 #ifdef CONFIG_X86_64 107 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 108 gfp |= GFP_DMA32; 109 #endif 110 return gfp; 111 } 112 113 #endif
1 2 /* 3 * Cadence MACB/GEM Ethernet Controller driver 4 * 5 * Copyright (C) 2004-2006 Atmel Corporation 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 #include <linux/clk.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <linux/circ_buf.h> 19 #include <linux/slab.h> 20 #include <linux/init.h> 21 #include <linux/io.h> 22 #include <linux/gpio.h> 23 #include <linux/gpio/consumer.h> 24 #include <linux/interrupt.h> 25 #include <linux/netdevice.h> 26 #include <linux/etherdevice.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/platform_data/macb.h> 29 #include <linux/platform_device.h> 30 #include <linux/phy.h> 31 #include <linux/of.h> 32 #include <linux/of_device.h> 33 #include <linux/of_gpio.h> 34 #include <linux/of_mdio.h> 35 #include <linux/of_net.h> 36 37 #include "macb.h" 38 39 #define MACB_RX_BUFFER_SIZE 128 40 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 41 #define RX_RING_SIZE 512 /* must be power of 2 */ 42 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 43 44 #define TX_RING_SIZE 128 /* must be power of 2 */ 45 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 46 47 /* level of occupied TX descriptors under which we wake up TX process */ 48 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 49 50 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 51 | MACB_BIT(ISR_ROVR)) 52 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 53 | MACB_BIT(ISR_RLE) \ 54 | MACB_BIT(TXERR)) 55 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 56 57 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 58 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 59 60 #define GEM_MTU_MIN_SIZE 68 61 62 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 63 #define MACB_WOL_ENABLED (0x1 << 1) 64 65 /* Graceful stop timeouts in us. We should allow up to 66 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 67 */ 68 #define MACB_HALT_TIMEOUT 1230 69 70 /* Ring buffer accessors */ 71 static unsigned int macb_tx_ring_wrap(unsigned int index) 72 { 73 return index & (TX_RING_SIZE - 1); 74 } 75 76 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 77 unsigned int index) 78 { 79 return &queue->tx_ring[macb_tx_ring_wrap(index)]; 80 } 81 82 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 83 unsigned int index) 84 { 85 return &queue->tx_skb[macb_tx_ring_wrap(index)]; 86 } 87 88 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 89 { 90 dma_addr_t offset; 91 92 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); 93 94 return queue->tx_ring_dma + offset; 95 } 96 97 static unsigned int macb_rx_ring_wrap(unsigned int index) 98 { 99 return index & (RX_RING_SIZE - 1); 100 } 101 102 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) 103 { 104 return &bp->rx_ring[macb_rx_ring_wrap(index)]; 105 } 106 107 static void *macb_rx_buffer(struct macb *bp, unsigned int index) 108 { 109 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); 110 } 111 112 /* I/O accessors */ 113 static u32 hw_readl_native(struct macb *bp, int offset) 114 { 115 return __raw_readl(bp->regs + offset); 116 } 117 118 static void hw_writel_native(struct macb *bp, int offset, u32 value) 119 { 120 __raw_writel(value, bp->regs + offset); 121 } 122 123 static u32 hw_readl(struct macb *bp, int offset) 124 { 125 return readl_relaxed(bp->regs + offset); 126 } 127 128 static void hw_writel(struct macb *bp, int offset, u32 value) 129 { 130 writel_relaxed(value, bp->regs + offset); 131 } 132 133 /* Find the CPU endianness by using the loopback bit of NCR register. When the 134 * CPU is in big endian we need to program swapped mode for management 135 * descriptor access. 136 */ 137 static bool hw_is_native_io(void __iomem *addr) 138 { 139 u32 value = MACB_BIT(LLB); 140 141 __raw_writel(value, addr + MACB_NCR); 142 value = __raw_readl(addr + MACB_NCR); 143 144 /* Write 0 back to disable everything */ 145 __raw_writel(0, addr + MACB_NCR); 146 147 return value == MACB_BIT(LLB); 148 } 149 150 static bool hw_is_gem(void __iomem *addr, bool native_io) 151 { 152 u32 id; 153 154 if (native_io) 155 id = __raw_readl(addr + MACB_MID); 156 else 157 id = readl_relaxed(addr + MACB_MID); 158 159 return MACB_BFEXT(IDNUM, id) >= 0x2; 160 } 161 162 static void macb_set_hwaddr(struct macb *bp) 163 { 164 u32 bottom; 165 u16 top; 166 167 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 168 macb_or_gem_writel(bp, SA1B, bottom); 169 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 170 macb_or_gem_writel(bp, SA1T, top); 171 172 /* Clear unused address register sets */ 173 macb_or_gem_writel(bp, SA2B, 0); 174 macb_or_gem_writel(bp, SA2T, 0); 175 macb_or_gem_writel(bp, SA3B, 0); 176 macb_or_gem_writel(bp, SA3T, 0); 177 macb_or_gem_writel(bp, SA4B, 0); 178 macb_or_gem_writel(bp, SA4T, 0); 179 } 180 181 static void macb_get_hwaddr(struct macb *bp) 182 { 183 struct macb_platform_data *pdata; 184 u32 bottom; 185 u16 top; 186 u8 addr[6]; 187 int i; 188 189 pdata = dev_get_platdata(&bp->pdev->dev); 190 191 /* Check all 4 address register for valid address */ 192 for (i = 0; i < 4; i++) { 193 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 194 top = macb_or_gem_readl(bp, SA1T + i * 8); 195 196 if (pdata && pdata->rev_eth_addr) { 197 addr[5] = bottom & 0xff; 198 addr[4] = (bottom >> 8) & 0xff; 199 addr[3] = (bottom >> 16) & 0xff; 200 addr[2] = (bottom >> 24) & 0xff; 201 addr[1] = top & 0xff; 202 addr[0] = (top & 0xff00) >> 8; 203 } else { 204 addr[0] = bottom & 0xff; 205 addr[1] = (bottom >> 8) & 0xff; 206 addr[2] = (bottom >> 16) & 0xff; 207 addr[3] = (bottom >> 24) & 0xff; 208 addr[4] = top & 0xff; 209 addr[5] = (top >> 8) & 0xff; 210 } 211 212 if (is_valid_ether_addr(addr)) { 213 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 214 return; 215 } 216 } 217 218 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 219 eth_hw_addr_random(bp->dev); 220 } 221 222 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 223 { 224 struct macb *bp = bus->priv; 225 int value; 226 227 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 228 | MACB_BF(RW, MACB_MAN_READ) 229 | MACB_BF(PHYA, mii_id) 230 | MACB_BF(REGA, regnum) 231 | MACB_BF(CODE, MACB_MAN_CODE))); 232 233 /* wait for end of transfer */ 234 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 235 cpu_relax(); 236 237 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 238 239 return value; 240 } 241 242 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 243 u16 value) 244 { 245 struct macb *bp = bus->priv; 246 247 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 248 | MACB_BF(RW, MACB_MAN_WRITE) 249 | MACB_BF(PHYA, mii_id) 250 | MACB_BF(REGA, regnum) 251 | MACB_BF(CODE, MACB_MAN_CODE) 252 | MACB_BF(DATA, value))); 253 254 /* wait for end of transfer */ 255 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 256 cpu_relax(); 257 258 return 0; 259 } 260 261 /** 262 * macb_set_tx_clk() - Set a clock to a new frequency 263 * @clk Pointer to the clock to change 264 * @rate New frequency in Hz 265 * @dev Pointer to the struct net_device 266 */ 267 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 268 { 269 long ferr, rate, rate_rounded; 270 271 if (!clk) 272 return; 273 274 switch (speed) { 275 case SPEED_10: 276 rate = 2500000; 277 break; 278 case SPEED_100: 279 rate = 25000000; 280 break; 281 case SPEED_1000: 282 rate = 125000000; 283 break; 284 default: 285 return; 286 } 287 288 rate_rounded = clk_round_rate(clk, rate); 289 if (rate_rounded < 0) 290 return; 291 292 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 293 * is not satisfied. 294 */ 295 ferr = abs(rate_rounded - rate); 296 ferr = DIV_ROUND_UP(ferr, rate / 100000); 297 if (ferr > 5) 298 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 299 rate); 300 301 if (clk_set_rate(clk, rate_rounded)) 302 netdev_err(dev, "adjusting tx_clk failed.\n"); 303 } 304 305 static void macb_handle_link_change(struct net_device *dev) 306 { 307 struct macb *bp = netdev_priv(dev); 308 struct phy_device *phydev = dev->phydev; 309 unsigned long flags; 310 int status_change = 0; 311 312 spin_lock_irqsave(&bp->lock, flags); 313 314 if (phydev->link) { 315 if ((bp->speed != phydev->speed) || 316 (bp->duplex != phydev->duplex)) { 317 u32 reg; 318 319 reg = macb_readl(bp, NCFGR); 320 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 321 if (macb_is_gem(bp)) 322 reg &= ~GEM_BIT(GBE); 323 324 if (phydev->duplex) 325 reg |= MACB_BIT(FD); 326 if (phydev->speed == SPEED_100) 327 reg |= MACB_BIT(SPD); 328 if (phydev->speed == SPEED_1000 && 329 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 330 reg |= GEM_BIT(GBE); 331 332 macb_or_gem_writel(bp, NCFGR, reg); 333 334 bp->speed = phydev->speed; 335 bp->duplex = phydev->duplex; 336 status_change = 1; 337 } 338 } 339 340 if (phydev->link != bp->link) { 341 if (!phydev->link) { 342 bp->speed = 0; 343 bp->duplex = -1; 344 } 345 bp->link = phydev->link; 346 347 status_change = 1; 348 } 349 350 spin_unlock_irqrestore(&bp->lock, flags); 351 352 if (status_change) { 353 if (phydev->link) { 354 /* Update the TX clock rate if and only if the link is 355 * up and there has been a link change. 356 */ 357 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); 358 359 netif_carrier_on(dev); 360 netdev_info(dev, "link up (%d/%s)\n", 361 phydev->speed, 362 phydev->duplex == DUPLEX_FULL ? 363 "Full" : "Half"); 364 } else { 365 netif_carrier_off(dev); 366 netdev_info(dev, "link down\n"); 367 } 368 } 369 } 370 371 /* based on au1000_eth. c*/ 372 static int macb_mii_probe(struct net_device *dev) 373 { 374 struct macb *bp = netdev_priv(dev); 375 struct macb_platform_data *pdata; 376 struct phy_device *phydev; 377 int phy_irq; 378 int ret; 379 380 phydev = phy_find_first(bp->mii_bus); 381 if (!phydev) { 382 netdev_err(dev, "no PHY found\n"); 383 return -ENXIO; 384 } 385 386 pdata = dev_get_platdata(&bp->pdev->dev); 387 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { 388 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, 389 "phy int"); 390 if (!ret) { 391 phy_irq = gpio_to_irq(pdata->phy_irq_pin); 392 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; 393 } 394 } 395 396 /* attach the mac to the phy */ 397 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 398 bp->phy_interface); 399 if (ret) { 400 netdev_err(dev, "Could not attach to PHY\n"); 401 return ret; 402 } 403 404 /* mask with MAC supported features */ 405 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 406 phydev->supported &= PHY_GBIT_FEATURES; 407 else 408 phydev->supported &= PHY_BASIC_FEATURES; 409 410 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) 411 phydev->supported &= ~SUPPORTED_1000baseT_Half; 412 413 phydev->advertising = phydev->supported; 414 415 bp->link = 0; 416 bp->speed = 0; 417 bp->duplex = -1; 418 419 return 0; 420 } 421 422 static int macb_mii_init(struct macb *bp) 423 { 424 struct macb_platform_data *pdata; 425 struct device_node *np; 426 int err = -ENXIO, i; 427 428 /* Enable management port */ 429 macb_writel(bp, NCR, MACB_BIT(MPE)); 430 431 bp->mii_bus = mdiobus_alloc(); 432 if (!bp->mii_bus) { 433 err = -ENOMEM; 434 goto err_out; 435 } 436 437 bp->mii_bus->name = "MACB_mii_bus"; 438 bp->mii_bus->read = &macb_mdio_read; 439 bp->mii_bus->write = &macb_mdio_write; 440 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 441 bp->pdev->name, bp->pdev->id); 442 bp->mii_bus->priv = bp; 443 bp->mii_bus->parent = &bp->pdev->dev; 444 pdata = dev_get_platdata(&bp->pdev->dev); 445 446 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 447 448 np = bp->pdev->dev.of_node; 449 if (np) { 450 /* try dt phy registration */ 451 err = of_mdiobus_register(bp->mii_bus, np); 452 453 /* fallback to standard phy registration if no phy were 454 * found during dt phy registration 455 */ 456 if (!err && !phy_find_first(bp->mii_bus)) { 457 for (i = 0; i < PHY_MAX_ADDR; i++) { 458 struct phy_device *phydev; 459 460 phydev = mdiobus_scan(bp->mii_bus, i); 461 if (IS_ERR(phydev) && 462 PTR_ERR(phydev) != -ENODEV) { 463 err = PTR_ERR(phydev); 464 break; 465 } 466 } 467 468 if (err) 469 goto err_out_unregister_bus; 470 } 471 } else { 472 if (pdata) 473 bp->mii_bus->phy_mask = pdata->phy_mask; 474 475 err = mdiobus_register(bp->mii_bus); 476 } 477 478 if (err) 479 goto err_out_free_mdiobus; 480 481 err = macb_mii_probe(bp->dev); 482 if (err) 483 goto err_out_unregister_bus; 484 485 return 0; 486 487 err_out_unregister_bus: 488 mdiobus_unregister(bp->mii_bus); 489 err_out_free_mdiobus: 490 mdiobus_free(bp->mii_bus); 491 err_out: 492 return err; 493 } 494 495 static void macb_update_stats(struct macb *bp) 496 { 497 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 498 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 499 int offset = MACB_PFR; 500 501 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 502 503 for (; p < end; p++, offset += 4) 504 *p += bp->macb_reg_readl(bp, offset); 505 } 506 507 static int macb_halt_tx(struct macb *bp) 508 { 509 unsigned long halt_time, timeout; 510 u32 status; 511 512 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 513 514 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 515 do { 516 halt_time = jiffies; 517 status = macb_readl(bp, TSR); 518 if (!(status & MACB_BIT(TGO))) 519 return 0; 520 521 usleep_range(10, 250); 522 } while (time_before(halt_time, timeout)); 523 524 return -ETIMEDOUT; 525 } 526 527 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 528 { 529 if (tx_skb->mapping) { 530 if (tx_skb->mapped_as_page) 531 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 532 tx_skb->size, DMA_TO_DEVICE); 533 else 534 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 535 tx_skb->size, DMA_TO_DEVICE); 536 tx_skb->mapping = 0; 537 } 538 539 if (tx_skb->skb) { 540 dev_kfree_skb_any(tx_skb->skb); 541 tx_skb->skb = NULL; 542 } 543 } 544 545 static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) 546 { 547 desc->addr = (u32)addr; 548 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 549 desc->addrh = (u32)(addr >> 32); 550 #endif 551 } 552 553 static void macb_tx_error_task(struct work_struct *work) 554 { 555 struct macb_queue *queue = container_of(work, struct macb_queue, 556 tx_error_task); 557 struct macb *bp = queue->bp; 558 struct macb_tx_skb *tx_skb; 559 struct macb_dma_desc *desc; 560 struct sk_buff *skb; 561 unsigned int tail; 562 unsigned long flags; 563 564 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 565 (unsigned int)(queue - bp->queues), 566 queue->tx_tail, queue->tx_head); 567 568 /* Prevent the queue IRQ handlers from running: each of them may call 569 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 570 * As explained below, we have to halt the transmission before updating 571 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 572 * network engine about the macb/gem being halted. 573 */ 574 spin_lock_irqsave(&bp->lock, flags); 575 576 /* Make sure nobody is trying to queue up new packets */ 577 netif_tx_stop_all_queues(bp->dev); 578 579 /* Stop transmission now 580 * (in case we have just queued new packets) 581 * macb/gem must be halted to write TBQP register 582 */ 583 if (macb_halt_tx(bp)) 584 /* Just complain for now, reinitializing TX path can be good */ 585 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 586 587 /* Treat frames in TX queue including the ones that caused the error. 588 * Free transmit buffers in upper layer. 589 */ 590 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 591 u32 ctrl; 592 593 desc = macb_tx_desc(queue, tail); 594 ctrl = desc->ctrl; 595 tx_skb = macb_tx_skb(queue, tail); 596 skb = tx_skb->skb; 597 598 if (ctrl & MACB_BIT(TX_USED)) { 599 /* skb is set for the last buffer of the frame */ 600 while (!skb) { 601 macb_tx_unmap(bp, tx_skb); 602 tail++; 603 tx_skb = macb_tx_skb(queue, tail); 604 skb = tx_skb->skb; 605 } 606 607 /* ctrl still refers to the first buffer descriptor 608 * since it's the only one written back by the hardware 609 */ 610 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 611 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 612 macb_tx_ring_wrap(tail), skb->data); 613 bp->stats.tx_packets++; 614 bp->stats.tx_bytes += skb->len; 615 } 616 } else { 617 /* "Buffers exhausted mid-frame" errors may only happen 618 * if the driver is buggy, so complain loudly about 619 * those. Statistics are updated by hardware. 620 */ 621 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 622 netdev_err(bp->dev, 623 "BUG: TX buffers exhausted mid-frame\n"); 624 625 desc->ctrl = ctrl | MACB_BIT(TX_USED); 626 } 627 628 macb_tx_unmap(bp, tx_skb); 629 } 630 631 /* Set end of TX queue */ 632 desc = macb_tx_desc(queue, 0); 633 macb_set_addr(desc, 0); 634 desc->ctrl = MACB_BIT(TX_USED); 635 636 /* Make descriptor updates visible to hardware */ 637 wmb(); 638 639 /* Reinitialize the TX desc queue */ 640 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 641 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 642 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 643 #endif 644 /* Make TX ring reflect state of hardware */ 645 queue->tx_head = 0; 646 queue->tx_tail = 0; 647 648 /* Housework before enabling TX IRQ */ 649 macb_writel(bp, TSR, macb_readl(bp, TSR)); 650 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 651 652 /* Now we are ready to start transmission again */ 653 netif_tx_start_all_queues(bp->dev); 654 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 655 656 spin_unlock_irqrestore(&bp->lock, flags); 657 } 658 659 static void macb_tx_interrupt(struct macb_queue *queue) 660 { 661 unsigned int tail; 662 unsigned int head; 663 u32 status; 664 struct macb *bp = queue->bp; 665 u16 queue_index = queue - bp->queues; 666 667 status = macb_readl(bp, TSR); 668 macb_writel(bp, TSR, status); 669 670 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 671 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 672 673 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 674 (unsigned long)status); 675 676 head = queue->tx_head; 677 for (tail = queue->tx_tail; tail != head; tail++) { 678 struct macb_tx_skb *tx_skb; 679 struct sk_buff *skb; 680 struct macb_dma_desc *desc; 681 u32 ctrl; 682 683 desc = macb_tx_desc(queue, tail); 684 685 /* Make hw descriptor updates visible to CPU */ 686 rmb(); 687 688 ctrl = desc->ctrl; 689 690 /* TX_USED bit is only set by hardware on the very first buffer 691 * descriptor of the transmitted frame. 692 */ 693 if (!(ctrl & MACB_BIT(TX_USED))) 694 break; 695 696 /* Process all buffers of the current transmitted frame */ 697 for (;; tail++) { 698 tx_skb = macb_tx_skb(queue, tail); 699 skb = tx_skb->skb; 700 701 /* First, update TX stats if needed */ 702 if (skb) { 703 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 704 macb_tx_ring_wrap(tail), skb->data); 705 bp->stats.tx_packets++; 706 bp->stats.tx_bytes += skb->len; 707 } 708 709 /* Now we can safely release resources */ 710 macb_tx_unmap(bp, tx_skb); 711 712 /* skb is set only for the last buffer of the frame. 713 * WARNING: at this point skb has been freed by 714 * macb_tx_unmap(). 715 */ 716 if (skb) 717 break; 718 } 719 } 720 721 queue->tx_tail = tail; 722 if (__netif_subqueue_stopped(bp->dev, queue_index) && 723 CIRC_CNT(queue->tx_head, queue->tx_tail, 724 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) 725 netif_wake_subqueue(bp->dev, queue_index); 726 } 727 728 static void gem_rx_refill(struct macb *bp) 729 { 730 unsigned int entry; 731 struct sk_buff *skb; 732 dma_addr_t paddr; 733 734 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, 735 RX_RING_SIZE) > 0) { 736 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 737 738 /* Make hw descriptor updates visible to CPU */ 739 rmb(); 740 741 bp->rx_prepared_head++; 742 743 if (!bp->rx_skbuff[entry]) { 744 /* allocate sk_buff for this free entry in ring */ 745 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 746 if (unlikely(!skb)) { 747 netdev_err(bp->dev, 748 "Unable to allocate sk_buff\n"); 749 break; 750 } 751 752 /* now fill corresponding descriptor entry */ 753 paddr = dma_map_single(&bp->pdev->dev, skb->data, 754 bp->rx_buffer_size, 755 DMA_FROM_DEVICE); 756 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 757 dev_kfree_skb(skb); 758 break; 759 } 760 761 bp->rx_skbuff[entry] = skb; 762 763 if (entry == RX_RING_SIZE - 1) 764 paddr |= MACB_BIT(RX_WRAP); 765 macb_set_addr(&(bp->rx_ring[entry]), paddr); 766 bp->rx_ring[entry].ctrl = 0; 767 768 /* properly align Ethernet header */ 769 skb_reserve(skb, NET_IP_ALIGN); 770 } else { 771 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); 772 bp->rx_ring[entry].ctrl = 0; 773 } 774 } 775 776 /* Make descriptor updates visible to hardware */ 777 wmb(); 778 779 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", 780 bp->rx_prepared_head, bp->rx_tail); 781 } 782 783 /* Mark DMA descriptors from begin up to and not including end as unused */ 784 static void discard_partial_frame(struct macb *bp, unsigned int begin, 785 unsigned int end) 786 { 787 unsigned int frag; 788 789 for (frag = begin; frag != end; frag++) { 790 struct macb_dma_desc *desc = macb_rx_desc(bp, frag); 791 792 desc->addr &= ~MACB_BIT(RX_USED); 793 } 794 795 /* Make descriptor updates visible to hardware */ 796 wmb(); 797 798 /* When this happens, the hardware stats registers for 799 * whatever caused this is updated, so we don't have to record 800 * anything. 801 */ 802 } 803 804 static int gem_rx(struct macb *bp, int budget) 805 { 806 unsigned int len; 807 unsigned int entry; 808 struct sk_buff *skb; 809 struct macb_dma_desc *desc; 810 int count = 0; 811 812 while (count < budget) { 813 u32 ctrl; 814 dma_addr_t addr; 815 bool rxused; 816 817 entry = macb_rx_ring_wrap(bp->rx_tail); 818 desc = &bp->rx_ring[entry]; 819 820 /* Make hw descriptor updates visible to CPU */ 821 rmb(); 822 823 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 824 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 825 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 826 addr |= ((u64)(desc->addrh) << 32); 827 #endif 828 ctrl = desc->ctrl; 829 830 if (!rxused) 831 break; 832 833 bp->rx_tail++; 834 count++; 835 836 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 837 netdev_err(bp->dev, 838 "not whole frame pointed by descriptor\n"); 839 bp->stats.rx_dropped++; 840 break; 841 } 842 skb = bp->rx_skbuff[entry]; 843 if (unlikely(!skb)) { 844 netdev_err(bp->dev, 845 "inconsistent Rx descriptor chain\n"); 846 bp->stats.rx_dropped++; 847 break; 848 } 849 /* now everything is ready for receiving packet */ 850 bp->rx_skbuff[entry] = NULL; 851 len = ctrl & bp->rx_frm_len_mask; 852 853 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 854 855 skb_put(skb, len); 856 dma_unmap_single(&bp->pdev->dev, addr, 857 bp->rx_buffer_size, DMA_FROM_DEVICE); 858 859 skb->protocol = eth_type_trans(skb, bp->dev); 860 skb_checksum_none_assert(skb); 861 if (bp->dev->features & NETIF_F_RXCSUM && 862 !(bp->dev->flags & IFF_PROMISC) && 863 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 864 skb->ip_summed = CHECKSUM_UNNECESSARY; 865 866 bp->stats.rx_packets++; 867 bp->stats.rx_bytes += skb->len; 868 869 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 870 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 871 skb->len, skb->csum); 872 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 873 skb_mac_header(skb), 16, true); 874 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 875 skb->data, 32, true); 876 #endif 877 878 netif_receive_skb(skb); 879 } 880 881 gem_rx_refill(bp); 882 883 return count; 884 } 885 886 static int macb_rx_frame(struct macb *bp, unsigned int first_frag, 887 unsigned int last_frag) 888 { 889 unsigned int len; 890 unsigned int frag; 891 unsigned int offset; 892 struct sk_buff *skb; 893 struct macb_dma_desc *desc; 894 895 desc = macb_rx_desc(bp, last_frag); 896 len = desc->ctrl & bp->rx_frm_len_mask; 897 898 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 899 macb_rx_ring_wrap(first_frag), 900 macb_rx_ring_wrap(last_frag), len); 901 902 /* The ethernet header starts NET_IP_ALIGN bytes into the 903 * first buffer. Since the header is 14 bytes, this makes the 904 * payload word-aligned. 905 * 906 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 907 * the two padding bytes into the skb so that we avoid hitting 908 * the slowpath in memcpy(), and pull them off afterwards. 909 */ 910 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 911 if (!skb) { 912 bp->stats.rx_dropped++; 913 for (frag = first_frag; ; frag++) { 914 desc = macb_rx_desc(bp, frag); 915 desc->addr &= ~MACB_BIT(RX_USED); 916 if (frag == last_frag) 917 break; 918 } 919 920 /* Make descriptor updates visible to hardware */ 921 wmb(); 922 923 return 1; 924 } 925 926 offset = 0; 927 len += NET_IP_ALIGN; 928 skb_checksum_none_assert(skb); 929 skb_put(skb, len); 930 931 for (frag = first_frag; ; frag++) { 932 unsigned int frag_len = bp->rx_buffer_size; 933 934 if (offset + frag_len > len) { 935 if (unlikely(frag != last_frag)) { 936 dev_kfree_skb_any(skb); 937 return -1; 938 } 939 frag_len = len - offset; 940 } 941 skb_copy_to_linear_data_offset(skb, offset, 942 macb_rx_buffer(bp, frag), 943 frag_len); 944 offset += bp->rx_buffer_size; 945 desc = macb_rx_desc(bp, frag); 946 desc->addr &= ~MACB_BIT(RX_USED); 947 948 if (frag == last_frag) 949 break; 950 } 951 952 /* Make descriptor updates visible to hardware */ 953 wmb(); 954 955 __skb_pull(skb, NET_IP_ALIGN); 956 skb->protocol = eth_type_trans(skb, bp->dev); 957 958 bp->stats.rx_packets++; 959 bp->stats.rx_bytes += skb->len; 960 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 961 skb->len, skb->csum); 962 netif_receive_skb(skb); 963 964 return 0; 965 } 966 967 static inline void macb_init_rx_ring(struct macb *bp) 968 { 969 dma_addr_t addr; 970 int i; 971 972 addr = bp->rx_buffers_dma; 973 for (i = 0; i < RX_RING_SIZE; i++) { 974 bp->rx_ring[i].addr = addr; 975 bp->rx_ring[i].ctrl = 0; 976 addr += bp->rx_buffer_size; 977 } 978 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); 979 } 980 981 static int macb_rx(struct macb *bp, int budget) 982 { 983 bool reset_rx_queue = false; 984 int received = 0; 985 unsigned int tail; 986 int first_frag = -1; 987 988 for (tail = bp->rx_tail; budget > 0; tail++) { 989 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); 990 u32 addr, ctrl; 991 992 /* Make hw descriptor updates visible to CPU */ 993 rmb(); 994 995 addr = desc->addr; 996 ctrl = desc->ctrl; 997 998 if (!(addr & MACB_BIT(RX_USED))) 999 break; 1000 1001 if (ctrl & MACB_BIT(RX_SOF)) { 1002 if (first_frag != -1) 1003 discard_partial_frame(bp, first_frag, tail); 1004 first_frag = tail; 1005 } 1006 1007 if (ctrl & MACB_BIT(RX_EOF)) { 1008 int dropped; 1009 1010 if (unlikely(first_frag == -1)) { 1011 reset_rx_queue = true; 1012 continue; 1013 } 1014 1015 dropped = macb_rx_frame(bp, first_frag, tail); 1016 first_frag = -1; 1017 if (unlikely(dropped < 0)) { 1018 reset_rx_queue = true; 1019 continue; 1020 } 1021 if (!dropped) { 1022 received++; 1023 budget--; 1024 } 1025 } 1026 } 1027 1028 if (unlikely(reset_rx_queue)) { 1029 unsigned long flags; 1030 u32 ctrl; 1031 1032 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1033 1034 spin_lock_irqsave(&bp->lock, flags); 1035 1036 ctrl = macb_readl(bp, NCR); 1037 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1038 1039 macb_init_rx_ring(bp); 1040 macb_writel(bp, RBQP, bp->rx_ring_dma); 1041 1042 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1043 1044 spin_unlock_irqrestore(&bp->lock, flags); 1045 return received; 1046 } 1047 1048 if (first_frag != -1) 1049 bp->rx_tail = first_frag; 1050 else 1051 bp->rx_tail = tail; 1052 1053 return received; 1054 } 1055 1056 static int macb_poll(struct napi_struct *napi, int budget) 1057 { 1058 struct macb *bp = container_of(napi, struct macb, napi); 1059 int work_done; 1060 u32 status; 1061 1062 status = macb_readl(bp, RSR); 1063 macb_writel(bp, RSR, status); 1064 1065 work_done = 0; 1066 1067 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1068 (unsigned long)status, budget); 1069 1070 work_done = bp->macbgem_ops.mog_rx(bp, budget); 1071 if (work_done < budget) { 1072 napi_complete(napi); 1073 1074 /* Packets received while interrupts were disabled */ 1075 status = macb_readl(bp, RSR); 1076 if (status) { 1077 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1078 macb_writel(bp, ISR, MACB_BIT(RCOMP)); 1079 napi_reschedule(napi); 1080 } else { 1081 macb_writel(bp, IER, MACB_RX_INT_FLAGS); 1082 } 1083 } 1084 1085 /* TODO: Handle errors */ 1086 1087 return work_done; 1088 } 1089 1090 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1091 { 1092 struct macb_queue *queue = dev_id; 1093 struct macb *bp = queue->bp; 1094 struct net_device *dev = bp->dev; 1095 u32 status, ctrl; 1096 1097 status = queue_readl(queue, ISR); 1098 1099 if (unlikely(!status)) 1100 return IRQ_NONE; 1101 1102 spin_lock(&bp->lock); 1103 1104 while (status) { 1105 /* close possible race with dev_close */ 1106 if (unlikely(!netif_running(dev))) { 1107 queue_writel(queue, IDR, -1); 1108 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1109 queue_writel(queue, ISR, -1); 1110 break; 1111 } 1112 1113 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1114 (unsigned int)(queue - bp->queues), 1115 (unsigned long)status); 1116 1117 if (status & MACB_RX_INT_FLAGS) { 1118 /* There's no point taking any more interrupts 1119 * until we have processed the buffers. The 1120 * scheduling call may fail if the poll routine 1121 * is already scheduled, so disable interrupts 1122 * now. 1123 */ 1124 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1125 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1126 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1127 1128 if (napi_schedule_prep(&bp->napi)) { 1129 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1130 __napi_schedule(&bp->napi); 1131 } 1132 } 1133 1134 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1135 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1136 schedule_work(&queue->tx_error_task); 1137 1138 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1139 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1140 1141 break; 1142 } 1143 1144 if (status & MACB_BIT(TCOMP)) 1145 macb_tx_interrupt(queue); 1146 1147 /* Link change detection isn't possible with RMII, so we'll 1148 * add that if/when we get our hands on a full-blown MII PHY. 1149 */ 1150 1151 /* There is a hardware issue under heavy load where DMA can 1152 * stop, this causes endless "used buffer descriptor read" 1153 * interrupts but it can be cleared by re-enabling RX. See 1154 * the at91 manual, section 41.3.1 or the Zynq manual 1155 * section 16.7.4 for details. 1156 */ 1157 if (status & MACB_BIT(RXUBR)) { 1158 ctrl = macb_readl(bp, NCR); 1159 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1160 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1161 1162 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1163 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1164 } 1165 1166 if (status & MACB_BIT(ISR_ROVR)) { 1167 /* We missed at least one packet */ 1168 if (macb_is_gem(bp)) 1169 bp->hw_stats.gem.rx_overruns++; 1170 else 1171 bp->hw_stats.macb.rx_overruns++; 1172 1173 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1174 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1175 } 1176 1177 if (status & MACB_BIT(HRESP)) { 1178 /* TODO: Reset the hardware, and maybe move the 1179 * netdev_err to a lower-priority context as well 1180 * (work queue?) 1181 */ 1182 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1183 1184 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1185 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1186 } 1187 1188 status = queue_readl(queue, ISR); 1189 } 1190 1191 spin_unlock(&bp->lock); 1192 1193 return IRQ_HANDLED; 1194 } 1195 1196 #ifdef CONFIG_NET_POLL_CONTROLLER 1197 /* Polling receive - used by netconsole and other diagnostic tools 1198 * to allow network i/o with interrupts disabled. 1199 */ 1200 static void macb_poll_controller(struct net_device *dev) 1201 { 1202 struct macb *bp = netdev_priv(dev); 1203 struct macb_queue *queue; 1204 unsigned long flags; 1205 unsigned int q; 1206 1207 local_irq_save(flags); 1208 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1209 macb_interrupt(dev->irq, queue); 1210 local_irq_restore(flags); 1211 } 1212 #endif 1213 1214 static unsigned int macb_tx_map(struct macb *bp, 1215 struct macb_queue *queue, 1216 struct sk_buff *skb) 1217 { 1218 dma_addr_t mapping; 1219 unsigned int len, entry, i, tx_head = queue->tx_head; 1220 struct macb_tx_skb *tx_skb = NULL; 1221 struct macb_dma_desc *desc; 1222 unsigned int offset, size, count = 0; 1223 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1224 unsigned int eof = 1; 1225 u32 ctrl; 1226 1227 /* First, map non-paged data */ 1228 len = skb_headlen(skb); 1229 offset = 0; 1230 while (len) { 1231 size = min(len, bp->max_tx_length); 1232 entry = macb_tx_ring_wrap(tx_head); 1233 tx_skb = &queue->tx_skb[entry]; 1234 1235 mapping = dma_map_single(&bp->pdev->dev, 1236 skb->data + offset, 1237 size, DMA_TO_DEVICE); 1238 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1239 goto dma_error; 1240 1241 /* Save info to properly release resources */ 1242 tx_skb->skb = NULL; 1243 tx_skb->mapping = mapping; 1244 tx_skb->size = size; 1245 tx_skb->mapped_as_page = false; 1246 1247 len -= size; 1248 offset += size; 1249 count++; 1250 tx_head++; 1251 } 1252 1253 /* Then, map paged data from fragments */ 1254 for (f = 0; f < nr_frags; f++) { 1255 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1256 1257 len = skb_frag_size(frag); 1258 offset = 0; 1259 while (len) { 1260 size = min(len, bp->max_tx_length); 1261 entry = macb_tx_ring_wrap(tx_head); 1262 tx_skb = &queue->tx_skb[entry]; 1263 1264 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1265 offset, size, DMA_TO_DEVICE); 1266 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1267 goto dma_error; 1268 1269 /* Save info to properly release resources */ 1270 tx_skb->skb = NULL; 1271 tx_skb->mapping = mapping; 1272 tx_skb->size = size; 1273 tx_skb->mapped_as_page = true; 1274 1275 len -= size; 1276 offset += size; 1277 count++; 1278 tx_head++; 1279 } 1280 } 1281 1282 /* Should never happen */ 1283 if (unlikely(!tx_skb)) { 1284 netdev_err(bp->dev, "BUG! empty skb!\n"); 1285 return 0; 1286 } 1287 1288 /* This is the last buffer of the frame: save socket buffer */ 1289 tx_skb->skb = skb; 1290 1291 /* Update TX ring: update buffer descriptors in reverse order 1292 * to avoid race condition 1293 */ 1294 1295 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1296 * to set the end of TX queue 1297 */ 1298 i = tx_head; 1299 entry = macb_tx_ring_wrap(i); 1300 ctrl = MACB_BIT(TX_USED); 1301 desc = &queue->tx_ring[entry]; 1302 desc->ctrl = ctrl; 1303 1304 do { 1305 i--; 1306 entry = macb_tx_ring_wrap(i); 1307 tx_skb = &queue->tx_skb[entry]; 1308 desc = &queue->tx_ring[entry]; 1309 1310 ctrl = (u32)tx_skb->size; 1311 if (eof) { 1312 ctrl |= MACB_BIT(TX_LAST); 1313 eof = 0; 1314 } 1315 if (unlikely(entry == (TX_RING_SIZE - 1))) 1316 ctrl |= MACB_BIT(TX_WRAP); 1317 1318 /* Set TX buffer descriptor */ 1319 macb_set_addr(desc, tx_skb->mapping); 1320 /* desc->addr must be visible to hardware before clearing 1321 * 'TX_USED' bit in desc->ctrl. 1322 */ 1323 wmb(); 1324 desc->ctrl = ctrl; 1325 } while (i != queue->tx_head); 1326 1327 queue->tx_head = tx_head; 1328 1329 return count; 1330 1331 dma_error: 1332 netdev_err(bp->dev, "TX DMA map failed\n"); 1333 1334 for (i = queue->tx_head; i != tx_head; i++) { 1335 tx_skb = macb_tx_skb(queue, i); 1336 1337 macb_tx_unmap(bp, tx_skb); 1338 } 1339 1340 return 0; 1341 } 1342 1343 static inline int macb_clear_csum(struct sk_buff *skb) 1344 { 1345 /* no change for packets without checksum offloading */ 1346 if (skb->ip_summed != CHECKSUM_PARTIAL) 1347 return 0; 1348 1349 /* make sure we can modify the header */ 1350 if (unlikely(skb_cow_head(skb, 0))) 1351 return -1; 1352 1353 /* initialize checksum field 1354 * This is required - at least for Zynq, which otherwise calculates 1355 * wrong UDP header checksums for UDP packets with UDP data len <=2 1356 */ 1357 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1358 return 0; 1359 } 1360 1361 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1362 { 1363 u16 queue_index = skb_get_queue_mapping(skb); 1364 struct macb *bp = netdev_priv(dev); 1365 struct macb_queue *queue = &bp->queues[queue_index]; 1366 unsigned long flags; 1367 unsigned int count, nr_frags, frag_size, f; 1368 1369 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1370 netdev_vdbg(bp->dev, 1371 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1372 queue_index, skb->len, skb->head, skb->data, 1373 skb_tail_pointer(skb), skb_end_pointer(skb)); 1374 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1375 skb->data, 16, true); 1376 #endif 1377 1378 /* Count how many TX buffer descriptors are needed to send this 1379 * socket buffer: skb fragments of jumbo frames may need to be 1380 * split into many buffer descriptors. 1381 */ 1382 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1383 nr_frags = skb_shinfo(skb)->nr_frags; 1384 for (f = 0; f < nr_frags; f++) { 1385 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1386 count += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1387 } 1388 1389 spin_lock_irqsave(&bp->lock, flags); 1390 1391 /* This is a hard error, log it. */ 1392 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) { 1393 netif_stop_subqueue(dev, queue_index); 1394 spin_unlock_irqrestore(&bp->lock, flags); 1395 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1396 queue->tx_head, queue->tx_tail); 1397 return NETDEV_TX_BUSY; 1398 } 1399 1400 if (macb_clear_csum(skb)) { 1401 dev_kfree_skb_any(skb); 1402 goto unlock; 1403 } 1404 1405 /* Map socket buffer for DMA transfer */ 1406 if (!macb_tx_map(bp, queue, skb)) { 1407 dev_kfree_skb_any(skb); 1408 goto unlock; 1409 } 1410 1411 /* Make newly initialized descriptor visible to hardware */ 1412 wmb(); 1413 1414 skb_tx_timestamp(skb); 1415 1416 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1417 1418 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1) 1419 netif_stop_subqueue(dev, queue_index); 1420 1421 unlock: 1422 spin_unlock_irqrestore(&bp->lock, flags); 1423 1424 return NETDEV_TX_OK; 1425 } 1426 1427 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 1428 { 1429 if (!macb_is_gem(bp)) { 1430 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 1431 } else { 1432 bp->rx_buffer_size = size; 1433 1434 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 1435 netdev_dbg(bp->dev, 1436 "RX buffer must be multiple of %d bytes, expanding\n", 1437 RX_BUFFER_MULTIPLE); 1438 bp->rx_buffer_size = 1439 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 1440 } 1441 } 1442 1443 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", 1444 bp->dev->mtu, bp->rx_buffer_size); 1445 } 1446 1447 static void gem_free_rx_buffers(struct macb *bp) 1448 { 1449 struct sk_buff *skb; 1450 struct macb_dma_desc *desc; 1451 dma_addr_t addr; 1452 int i; 1453 1454 if (!bp->rx_skbuff) 1455 return; 1456 1457 for (i = 0; i < RX_RING_SIZE; i++) { 1458 skb = bp->rx_skbuff[i]; 1459 1460 if (!skb) 1461 continue; 1462 1463 desc = &bp->rx_ring[i]; 1464 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1465 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1466 addr |= ((u64)(desc->addrh) << 32); 1467 #endif 1468 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1469 DMA_FROM_DEVICE); 1470 dev_kfree_skb_any(skb); 1471 skb = NULL; 1472 } 1473 1474 kfree(bp->rx_skbuff); 1475 bp->rx_skbuff = NULL; 1476 } 1477 1478 static void macb_free_rx_buffers(struct macb *bp) 1479 { 1480 if (bp->rx_buffers) { 1481 dma_free_coherent(&bp->pdev->dev, 1482 RX_RING_SIZE * bp->rx_buffer_size, 1483 bp->rx_buffers, bp->rx_buffers_dma); 1484 bp->rx_buffers = NULL; 1485 } 1486 } 1487 1488 static void macb_free_consistent(struct macb *bp) 1489 { 1490 struct macb_queue *queue; 1491 unsigned int q; 1492 1493 bp->macbgem_ops.mog_free_rx_buffers(bp); 1494 if (bp->rx_ring) { 1495 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, 1496 bp->rx_ring, bp->rx_ring_dma); 1497 bp->rx_ring = NULL; 1498 } 1499 1500 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1501 kfree(queue->tx_skb); 1502 queue->tx_skb = NULL; 1503 if (queue->tx_ring) { 1504 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, 1505 queue->tx_ring, queue->tx_ring_dma); 1506 queue->tx_ring = NULL; 1507 } 1508 } 1509 } 1510 1511 static int gem_alloc_rx_buffers(struct macb *bp) 1512 { 1513 int size; 1514 1515 size = RX_RING_SIZE * sizeof(struct sk_buff *); 1516 bp->rx_skbuff = kzalloc(size, GFP_KERNEL); 1517 if (!bp->rx_skbuff) 1518 return -ENOMEM; 1519 1520 netdev_dbg(bp->dev, 1521 "Allocated %d RX struct sk_buff entries at %p\n", 1522 RX_RING_SIZE, bp->rx_skbuff); 1523 return 0; 1524 } 1525 1526 static int macb_alloc_rx_buffers(struct macb *bp) 1527 { 1528 int size; 1529 1530 size = RX_RING_SIZE * bp->rx_buffer_size; 1531 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 1532 &bp->rx_buffers_dma, GFP_KERNEL); 1533 if (!bp->rx_buffers) 1534 return -ENOMEM; 1535 1536 netdev_dbg(bp->dev, 1537 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 1538 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); 1539 return 0; 1540 } 1541 1542 static int macb_alloc_consistent(struct macb *bp) 1543 { 1544 struct macb_queue *queue; 1545 unsigned int q; 1546 int size; 1547 1548 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1549 size = TX_RING_BYTES; 1550 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1551 &queue->tx_ring_dma, 1552 GFP_KERNEL); 1553 if (!queue->tx_ring) 1554 goto out_err; 1555 netdev_dbg(bp->dev, 1556 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 1557 q, size, (unsigned long)queue->tx_ring_dma, 1558 queue->tx_ring); 1559 1560 size = TX_RING_SIZE * sizeof(struct macb_tx_skb); 1561 queue->tx_skb = kmalloc(size, GFP_KERNEL); 1562 if (!queue->tx_skb) 1563 goto out_err; 1564 } 1565 1566 size = RX_RING_BYTES; 1567 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1568 &bp->rx_ring_dma, GFP_KERNEL); 1569 if (!bp->rx_ring) 1570 goto out_err; 1571 netdev_dbg(bp->dev, 1572 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 1573 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); 1574 1575 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 1576 goto out_err; 1577 1578 return 0; 1579 1580 out_err: 1581 macb_free_consistent(bp); 1582 return -ENOMEM; 1583 } 1584 1585 static void gem_init_rings(struct macb *bp) 1586 { 1587 struct macb_queue *queue; 1588 unsigned int q; 1589 int i; 1590 1591 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1592 for (i = 0; i < TX_RING_SIZE; i++) { 1593 macb_set_addr(&(queue->tx_ring[i]), 0); 1594 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1595 } 1596 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1597 queue->tx_head = 0; 1598 queue->tx_tail = 0; 1599 } 1600 1601 bp->rx_tail = 0; 1602 bp->rx_prepared_head = 0; 1603 1604 gem_rx_refill(bp); 1605 } 1606 1607 static void macb_init_rings(struct macb *bp) 1608 { 1609 int i; 1610 1611 macb_init_rx_ring(bp); 1612 1613 for (i = 0; i < TX_RING_SIZE; i++) { 1614 bp->queues[0].tx_ring[i].addr = 0; 1615 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); 1616 } 1617 bp->queues[0].tx_head = 0; 1618 bp->queues[0].tx_tail = 0; 1619 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1620 1621 bp->rx_tail = 0; 1622 } 1623 1624 static void macb_reset_hw(struct macb *bp) 1625 { 1626 struct macb_queue *queue; 1627 unsigned int q; 1628 1629 /* Disable RX and TX (XXX: Should we halt the transmission 1630 * more gracefully?) 1631 */ 1632 macb_writel(bp, NCR, 0); 1633 1634 /* Clear the stats registers (XXX: Update stats first?) */ 1635 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 1636 1637 /* Clear all status flags */ 1638 macb_writel(bp, TSR, -1); 1639 macb_writel(bp, RSR, -1); 1640 1641 /* Disable all interrupts */ 1642 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1643 queue_writel(queue, IDR, -1); 1644 queue_readl(queue, ISR); 1645 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1646 queue_writel(queue, ISR, -1); 1647 } 1648 } 1649 1650 static u32 gem_mdc_clk_div(struct macb *bp) 1651 { 1652 u32 config; 1653 unsigned long pclk_hz = clk_get_rate(bp->pclk); 1654 1655 if (pclk_hz <= 20000000) 1656 config = GEM_BF(CLK, GEM_CLK_DIV8); 1657 else if (pclk_hz <= 40000000) 1658 config = GEM_BF(CLK, GEM_CLK_DIV16); 1659 else if (pclk_hz <= 80000000) 1660 config = GEM_BF(CLK, GEM_CLK_DIV32); 1661 else if (pclk_hz <= 120000000) 1662 config = GEM_BF(CLK, GEM_CLK_DIV48); 1663 else if (pclk_hz <= 160000000) 1664 config = GEM_BF(CLK, GEM_CLK_DIV64); 1665 else 1666 config = GEM_BF(CLK, GEM_CLK_DIV96); 1667 1668 return config; 1669 } 1670 1671 static u32 macb_mdc_clk_div(struct macb *bp) 1672 { 1673 u32 config; 1674 unsigned long pclk_hz; 1675 1676 if (macb_is_gem(bp)) 1677 return gem_mdc_clk_div(bp); 1678 1679 pclk_hz = clk_get_rate(bp->pclk); 1680 if (pclk_hz <= 20000000) 1681 config = MACB_BF(CLK, MACB_CLK_DIV8); 1682 else if (pclk_hz <= 40000000) 1683 config = MACB_BF(CLK, MACB_CLK_DIV16); 1684 else if (pclk_hz <= 80000000) 1685 config = MACB_BF(CLK, MACB_CLK_DIV32); 1686 else 1687 config = MACB_BF(CLK, MACB_CLK_DIV64); 1688 1689 return config; 1690 } 1691 1692 /* Get the DMA bus width field of the network configuration register that we 1693 * should program. We find the width from decoding the design configuration 1694 * register to find the maximum supported data bus width. 1695 */ 1696 static u32 macb_dbw(struct macb *bp) 1697 { 1698 if (!macb_is_gem(bp)) 1699 return 0; 1700 1701 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 1702 case 4: 1703 return GEM_BF(DBW, GEM_DBW128); 1704 case 2: 1705 return GEM_BF(DBW, GEM_DBW64); 1706 case 1: 1707 default: 1708 return GEM_BF(DBW, GEM_DBW32); 1709 } 1710 } 1711 1712 /* Configure the receive DMA engine 1713 * - use the correct receive buffer size 1714 * - set best burst length for DMA operations 1715 * (if not supported by FIFO, it will fallback to default) 1716 * - set both rx/tx packet buffers to full memory size 1717 * These are configurable parameters for GEM. 1718 */ 1719 static void macb_configure_dma(struct macb *bp) 1720 { 1721 u32 dmacfg; 1722 1723 if (macb_is_gem(bp)) { 1724 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1725 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); 1726 if (bp->dma_burst_length) 1727 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 1728 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1729 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1730 1731 if (bp->native_io) 1732 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1733 else 1734 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1735 1736 if (bp->dev->features & NETIF_F_HW_CSUM) 1737 dmacfg |= GEM_BIT(TXCOEN); 1738 else 1739 dmacfg &= ~GEM_BIT(TXCOEN); 1740 1741 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1742 dmacfg |= GEM_BIT(ADDR64); 1743 #endif 1744 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 1745 dmacfg); 1746 gem_writel(bp, DMACFG, dmacfg); 1747 } 1748 } 1749 1750 static void macb_init_hw(struct macb *bp) 1751 { 1752 struct macb_queue *queue; 1753 unsigned int q; 1754 1755 u32 config; 1756 1757 macb_reset_hw(bp); 1758 macb_set_hwaddr(bp); 1759 1760 config = macb_mdc_clk_div(bp); 1761 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 1762 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 1763 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 1764 config |= MACB_BIT(PAE); /* PAuse Enable */ 1765 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 1766 if (bp->caps & MACB_CAPS_JUMBO) 1767 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 1768 else 1769 config |= MACB_BIT(BIG); /* Receive oversized frames */ 1770 if (bp->dev->flags & IFF_PROMISC) 1771 config |= MACB_BIT(CAF); /* Copy All Frames */ 1772 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 1773 config |= GEM_BIT(RXCOEN); 1774 if (!(bp->dev->flags & IFF_BROADCAST)) 1775 config |= MACB_BIT(NBC); /* No BroadCast */ 1776 config |= macb_dbw(bp); 1777 macb_writel(bp, NCFGR, config); 1778 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 1779 gem_writel(bp, JML, bp->jumbo_max_len); 1780 bp->speed = SPEED_10; 1781 bp->duplex = DUPLEX_HALF; 1782 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 1783 if (bp->caps & MACB_CAPS_JUMBO) 1784 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 1785 1786 macb_configure_dma(bp); 1787 1788 /* Initialize TX and RX buffers */ 1789 macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); 1790 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1791 macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); 1792 #endif 1793 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1794 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 1795 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1796 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 1797 #endif 1798 1799 /* Enable interrupts */ 1800 queue_writel(queue, IER, 1801 MACB_RX_INT_FLAGS | 1802 MACB_TX_INT_FLAGS | 1803 MACB_BIT(HRESP)); 1804 } 1805 1806 /* Enable TX and RX */ 1807 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1808 } 1809 1810 /* The hash address register is 64 bits long and takes up two 1811 * locations in the memory map. The least significant bits are stored 1812 * in EMAC_HSL and the most significant bits in EMAC_HSH. 1813 * 1814 * The unicast hash enable and the multicast hash enable bits in the 1815 * network configuration register enable the reception of hash matched 1816 * frames. The destination address is reduced to a 6 bit index into 1817 * the 64 bit hash register using the following hash function. The 1818 * hash function is an exclusive or of every sixth bit of the 1819 * destination address. 1820 * 1821 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 1822 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 1823 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 1824 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 1825 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 1826 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 1827 * 1828 * da[0] represents the least significant bit of the first byte 1829 * received, that is, the multicast/unicast indicator, and da[47] 1830 * represents the most significant bit of the last byte received. If 1831 * the hash index, hi[n], points to a bit that is set in the hash 1832 * register then the frame will be matched according to whether the 1833 * frame is multicast or unicast. A multicast match will be signalled 1834 * if the multicast hash enable bit is set, da[0] is 1 and the hash 1835 * index points to a bit set in the hash register. A unicast match 1836 * will be signalled if the unicast hash enable bit is set, da[0] is 0 1837 * and the hash index points to a bit set in the hash register. To 1838 * receive all multicast frames, the hash register should be set with 1839 * all ones and the multicast hash enable bit should be set in the 1840 * network configuration register. 1841 */ 1842 1843 static inline int hash_bit_value(int bitnr, __u8 *addr) 1844 { 1845 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 1846 return 1; 1847 return 0; 1848 } 1849 1850 /* Return the hash index value for the specified address. */ 1851 static int hash_get_index(__u8 *addr) 1852 { 1853 int i, j, bitval; 1854 int hash_index = 0; 1855 1856 for (j = 0; j < 6; j++) { 1857 for (i = 0, bitval = 0; i < 8; i++) 1858 bitval ^= hash_bit_value(i * 6 + j, addr); 1859 1860 hash_index |= (bitval << j); 1861 } 1862 1863 return hash_index; 1864 } 1865 1866 /* Add multicast addresses to the internal multicast-hash table. */ 1867 static void macb_sethashtable(struct net_device *dev) 1868 { 1869 struct netdev_hw_addr *ha; 1870 unsigned long mc_filter[2]; 1871 unsigned int bitnr; 1872 struct macb *bp = netdev_priv(dev); 1873 1874 mc_filter[0] = 0; 1875 mc_filter[1] = 0; 1876 1877 netdev_for_each_mc_addr(ha, dev) { 1878 bitnr = hash_get_index(ha->addr); 1879 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 1880 } 1881 1882 macb_or_gem_writel(bp, HRB, mc_filter[0]); 1883 macb_or_gem_writel(bp, HRT, mc_filter[1]); 1884 } 1885 1886 /* Enable/Disable promiscuous and multicast modes. */ 1887 static void macb_set_rx_mode(struct net_device *dev) 1888 { 1889 unsigned long cfg; 1890 struct macb *bp = netdev_priv(dev); 1891 1892 cfg = macb_readl(bp, NCFGR); 1893 1894 if (dev->flags & IFF_PROMISC) { 1895 /* Enable promiscuous mode */ 1896 cfg |= MACB_BIT(CAF); 1897 1898 /* Disable RX checksum offload */ 1899 if (macb_is_gem(bp)) 1900 cfg &= ~GEM_BIT(RXCOEN); 1901 } else { 1902 /* Disable promiscuous mode */ 1903 cfg &= ~MACB_BIT(CAF); 1904 1905 /* Enable RX checksum offload only if requested */ 1906 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 1907 cfg |= GEM_BIT(RXCOEN); 1908 } 1909 1910 if (dev->flags & IFF_ALLMULTI) { 1911 /* Enable all multicast mode */ 1912 macb_or_gem_writel(bp, HRB, -1); 1913 macb_or_gem_writel(bp, HRT, -1); 1914 cfg |= MACB_BIT(NCFGR_MTI); 1915 } else if (!netdev_mc_empty(dev)) { 1916 /* Enable specific multicasts */ 1917 macb_sethashtable(dev); 1918 cfg |= MACB_BIT(NCFGR_MTI); 1919 } else if (dev->flags & (~IFF_ALLMULTI)) { 1920 /* Disable all multicast mode */ 1921 macb_or_gem_writel(bp, HRB, 0); 1922 macb_or_gem_writel(bp, HRT, 0); 1923 cfg &= ~MACB_BIT(NCFGR_MTI); 1924 } 1925 1926 macb_writel(bp, NCFGR, cfg); 1927 } 1928 1929 static int macb_open(struct net_device *dev) 1930 { 1931 struct macb *bp = netdev_priv(dev); 1932 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 1933 int err; 1934 1935 netdev_dbg(bp->dev, "open\n"); 1936 1937 /* carrier starts down */ 1938 netif_carrier_off(dev); 1939 1940 /* if the phy is not yet register, retry later*/ 1941 if (!dev->phydev) 1942 return -EAGAIN; 1943 1944 /* RX buffers initialization */ 1945 macb_init_rx_buffer_size(bp, bufsz); 1946 1947 err = macb_alloc_consistent(bp); 1948 if (err) { 1949 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 1950 err); 1951 return err; 1952 } 1953 1954 napi_enable(&bp->napi); 1955 1956 bp->macbgem_ops.mog_init_rings(bp); 1957 macb_init_hw(bp); 1958 1959 /* schedule a link state check */ 1960 phy_start(dev->phydev); 1961 1962 netif_tx_start_all_queues(dev); 1963 1964 return 0; 1965 } 1966 1967 static int macb_close(struct net_device *dev) 1968 { 1969 struct macb *bp = netdev_priv(dev); 1970 unsigned long flags; 1971 1972 netif_tx_stop_all_queues(dev); 1973 napi_disable(&bp->napi); 1974 1975 if (dev->phydev) 1976 phy_stop(dev->phydev); 1977 1978 spin_lock_irqsave(&bp->lock, flags); 1979 macb_reset_hw(bp); 1980 netif_carrier_off(dev); 1981 spin_unlock_irqrestore(&bp->lock, flags); 1982 1983 macb_free_consistent(bp); 1984 1985 return 0; 1986 } 1987 1988 static int macb_change_mtu(struct net_device *dev, int new_mtu) 1989 { 1990 struct macb *bp = netdev_priv(dev); 1991 u32 max_mtu; 1992 1993 if (netif_running(dev)) 1994 return -EBUSY; 1995 1996 max_mtu = ETH_DATA_LEN; 1997 if (bp->caps & MACB_CAPS_JUMBO) 1998 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 1999 2000 if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE)) 2001 return -EINVAL; 2002 2003 dev->mtu = new_mtu; 2004 2005 return 0; 2006 } 2007 2008 static void gem_update_stats(struct macb *bp) 2009 { 2010 unsigned int i; 2011 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2012 2013 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2014 u32 offset = gem_statistics[i].offset; 2015 u64 val = bp->macb_reg_readl(bp, offset); 2016 2017 bp->ethtool_stats[i] += val; 2018 *p += val; 2019 2020 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2021 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2022 val = bp->macb_reg_readl(bp, offset + 4); 2023 bp->ethtool_stats[i] += ((u64)val) << 32; 2024 *(++p) += val; 2025 } 2026 } 2027 } 2028 2029 static struct net_device_stats *gem_get_stats(struct macb *bp) 2030 { 2031 struct gem_stats *hwstat = &bp->hw_stats.gem; 2032 struct net_device_stats *nstat = &bp->stats; 2033 2034 gem_update_stats(bp); 2035 2036 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2037 hwstat->rx_alignment_errors + 2038 hwstat->rx_resource_errors + 2039 hwstat->rx_overruns + 2040 hwstat->rx_oversize_frames + 2041 hwstat->rx_jabbers + 2042 hwstat->rx_undersized_frames + 2043 hwstat->rx_length_field_frame_errors); 2044 nstat->tx_errors = (hwstat->tx_late_collisions + 2045 hwstat->tx_excessive_collisions + 2046 hwstat->tx_underrun + 2047 hwstat->tx_carrier_sense_errors); 2048 nstat->multicast = hwstat->rx_multicast_frames; 2049 nstat->collisions = (hwstat->tx_single_collision_frames + 2050 hwstat->tx_multiple_collision_frames + 2051 hwstat->tx_excessive_collisions); 2052 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2053 hwstat->rx_jabbers + 2054 hwstat->rx_undersized_frames + 2055 hwstat->rx_length_field_frame_errors); 2056 nstat->rx_over_errors = hwstat->rx_resource_errors; 2057 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2058 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2059 nstat->rx_fifo_errors = hwstat->rx_overruns; 2060 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2061 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2062 nstat->tx_fifo_errors = hwstat->tx_underrun; 2063 2064 return nstat; 2065 } 2066 2067 static void gem_get_ethtool_stats(struct net_device *dev, 2068 struct ethtool_stats *stats, u64 *data) 2069 { 2070 struct macb *bp; 2071 2072 bp = netdev_priv(dev); 2073 gem_update_stats(bp); 2074 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); 2075 } 2076 2077 static int gem_get_sset_count(struct net_device *dev, int sset) 2078 { 2079 switch (sset) { 2080 case ETH_SS_STATS: 2081 return GEM_STATS_LEN; 2082 default: 2083 return -EOPNOTSUPP; 2084 } 2085 } 2086 2087 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2088 { 2089 unsigned int i; 2090 2091 switch (sset) { 2092 case ETH_SS_STATS: 2093 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2094 memcpy(p, gem_statistics[i].stat_string, 2095 ETH_GSTRING_LEN); 2096 break; 2097 } 2098 } 2099 2100 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2101 { 2102 struct macb *bp = netdev_priv(dev); 2103 struct net_device_stats *nstat = &bp->stats; 2104 struct macb_stats *hwstat = &bp->hw_stats.macb; 2105 2106 if (macb_is_gem(bp)) 2107 return gem_get_stats(bp); 2108 2109 /* read stats from hardware */ 2110 macb_update_stats(bp); 2111 2112 /* Convert HW stats into netdevice stats */ 2113 nstat->rx_errors = (hwstat->rx_fcs_errors + 2114 hwstat->rx_align_errors + 2115 hwstat->rx_resource_errors + 2116 hwstat->rx_overruns + 2117 hwstat->rx_oversize_pkts + 2118 hwstat->rx_jabbers + 2119 hwstat->rx_undersize_pkts + 2120 hwstat->rx_length_mismatch); 2121 nstat->tx_errors = (hwstat->tx_late_cols + 2122 hwstat->tx_excessive_cols + 2123 hwstat->tx_underruns + 2124 hwstat->tx_carrier_errors + 2125 hwstat->sqe_test_errors); 2126 nstat->collisions = (hwstat->tx_single_cols + 2127 hwstat->tx_multiple_cols + 2128 hwstat->tx_excessive_cols); 2129 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2130 hwstat->rx_jabbers + 2131 hwstat->rx_undersize_pkts + 2132 hwstat->rx_length_mismatch); 2133 nstat->rx_over_errors = hwstat->rx_resource_errors + 2134 hwstat->rx_overruns; 2135 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2136 nstat->rx_frame_errors = hwstat->rx_align_errors; 2137 nstat->rx_fifo_errors = hwstat->rx_overruns; 2138 /* XXX: What does "missed" mean? */ 2139 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2140 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2141 nstat->tx_fifo_errors = hwstat->tx_underruns; 2142 /* Don't know about heartbeat or window errors... */ 2143 2144 return nstat; 2145 } 2146 2147 static int macb_get_regs_len(struct net_device *netdev) 2148 { 2149 return MACB_GREGS_NBR * sizeof(u32); 2150 } 2151 2152 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2153 void *p) 2154 { 2155 struct macb *bp = netdev_priv(dev); 2156 unsigned int tail, head; 2157 u32 *regs_buff = p; 2158 2159 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2160 | MACB_GREGS_VERSION; 2161 2162 tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); 2163 head = macb_tx_ring_wrap(bp->queues[0].tx_head); 2164 2165 regs_buff[0] = macb_readl(bp, NCR); 2166 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2167 regs_buff[2] = macb_readl(bp, NSR); 2168 regs_buff[3] = macb_readl(bp, TSR); 2169 regs_buff[4] = macb_readl(bp, RBQP); 2170 regs_buff[5] = macb_readl(bp, TBQP); 2171 regs_buff[6] = macb_readl(bp, RSR); 2172 regs_buff[7] = macb_readl(bp, IMR); 2173 2174 regs_buff[8] = tail; 2175 regs_buff[9] = head; 2176 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2177 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2178 2179 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2180 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2181 if (macb_is_gem(bp)) 2182 regs_buff[13] = gem_readl(bp, DMACFG); 2183 } 2184 2185 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2186 { 2187 struct macb *bp = netdev_priv(netdev); 2188 2189 wol->supported = 0; 2190 wol->wolopts = 0; 2191 2192 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2193 wol->supported = WAKE_MAGIC; 2194 2195 if (bp->wol & MACB_WOL_ENABLED) 2196 wol->wolopts |= WAKE_MAGIC; 2197 } 2198 } 2199 2200 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2201 { 2202 struct macb *bp = netdev_priv(netdev); 2203 2204 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2205 (wol->wolopts & ~WAKE_MAGIC)) 2206 return -EOPNOTSUPP; 2207 2208 if (wol->wolopts & WAKE_MAGIC) 2209 bp->wol |= MACB_WOL_ENABLED; 2210 else 2211 bp->wol &= ~MACB_WOL_ENABLED; 2212 2213 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2214 2215 return 0; 2216 } 2217 2218 static const struct ethtool_ops macb_ethtool_ops = { 2219 .get_regs_len = macb_get_regs_len, 2220 .get_regs = macb_get_regs, 2221 .get_link = ethtool_op_get_link, 2222 .get_ts_info = ethtool_op_get_ts_info, 2223 .get_wol = macb_get_wol, 2224 .set_wol = macb_set_wol, 2225 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2226 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2227 }; 2228 2229 static const struct ethtool_ops gem_ethtool_ops = { 2230 .get_regs_len = macb_get_regs_len, 2231 .get_regs = macb_get_regs, 2232 .get_link = ethtool_op_get_link, 2233 .get_ts_info = ethtool_op_get_ts_info, 2234 .get_ethtool_stats = gem_get_ethtool_stats, 2235 .get_strings = gem_get_ethtool_strings, 2236 .get_sset_count = gem_get_sset_count, 2237 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2238 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2239 }; 2240 2241 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2242 { 2243 struct phy_device *phydev = dev->phydev; 2244 2245 if (!netif_running(dev)) 2246 return -EINVAL; 2247 2248 if (!phydev) 2249 return -ENODEV; 2250 2251 return phy_mii_ioctl(phydev, rq, cmd); 2252 } 2253 2254 static int macb_set_features(struct net_device *netdev, 2255 netdev_features_t features) 2256 { 2257 struct macb *bp = netdev_priv(netdev); 2258 netdev_features_t changed = features ^ netdev->features; 2259 2260 /* TX checksum offload */ 2261 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { 2262 u32 dmacfg; 2263 2264 dmacfg = gem_readl(bp, DMACFG); 2265 if (features & NETIF_F_HW_CSUM) 2266 dmacfg |= GEM_BIT(TXCOEN); 2267 else 2268 dmacfg &= ~GEM_BIT(TXCOEN); 2269 gem_writel(bp, DMACFG, dmacfg); 2270 } 2271 2272 /* RX checksum offload */ 2273 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { 2274 u32 netcfg; 2275 2276 netcfg = gem_readl(bp, NCFGR); 2277 if (features & NETIF_F_RXCSUM && 2278 !(netdev->flags & IFF_PROMISC)) 2279 netcfg |= GEM_BIT(RXCOEN); 2280 else 2281 netcfg &= ~GEM_BIT(RXCOEN); 2282 gem_writel(bp, NCFGR, netcfg); 2283 } 2284 2285 return 0; 2286 } 2287 2288 static const struct net_device_ops macb_netdev_ops = { 2289 .ndo_open = macb_open, 2290 .ndo_stop = macb_close, 2291 .ndo_start_xmit = macb_start_xmit, 2292 .ndo_set_rx_mode = macb_set_rx_mode, 2293 .ndo_get_stats = macb_get_stats, 2294 .ndo_do_ioctl = macb_ioctl, 2295 .ndo_validate_addr = eth_validate_addr, 2296 .ndo_change_mtu = macb_change_mtu, 2297 .ndo_set_mac_address = eth_mac_addr, 2298 #ifdef CONFIG_NET_POLL_CONTROLLER 2299 .ndo_poll_controller = macb_poll_controller, 2300 #endif 2301 .ndo_set_features = macb_set_features, 2302 }; 2303 2304 /* Configure peripheral capabilities according to device tree 2305 * and integration options used 2306 */ 2307 static void macb_configure_caps(struct macb *bp, 2308 const struct macb_config *dt_conf) 2309 { 2310 u32 dcfg; 2311 2312 if (dt_conf) 2313 bp->caps = dt_conf->caps; 2314 2315 if (hw_is_gem(bp->regs, bp->native_io)) { 2316 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2317 2318 dcfg = gem_readl(bp, DCFG1); 2319 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 2320 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 2321 dcfg = gem_readl(bp, DCFG2); 2322 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 2323 bp->caps |= MACB_CAPS_FIFO_MODE; 2324 } 2325 2326 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 2327 } 2328 2329 static void macb_probe_queues(void __iomem *mem, 2330 bool native_io, 2331 unsigned int *queue_mask, 2332 unsigned int *num_queues) 2333 { 2334 unsigned int hw_q; 2335 2336 *queue_mask = 0x1; 2337 *num_queues = 1; 2338 2339 /* is it macb or gem ? 2340 * 2341 * We need to read directly from the hardware here because 2342 * we are early in the probe process and don't have the 2343 * MACB_CAPS_MACB_IS_GEM flag positioned 2344 */ 2345 if (!hw_is_gem(mem, native_io)) 2346 return; 2347 2348 /* bit 0 is never set but queue 0 always exists */ 2349 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; 2350 2351 *queue_mask |= 0x1; 2352 2353 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 2354 if (*queue_mask & (1 << hw_q)) 2355 (*num_queues)++; 2356 } 2357 2358 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 2359 struct clk **hclk, struct clk **tx_clk, 2360 struct clk **rx_clk) 2361 { 2362 int err; 2363 2364 *pclk = devm_clk_get(&pdev->dev, "pclk"); 2365 if (IS_ERR(*pclk)) { 2366 err = PTR_ERR(*pclk); 2367 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 2368 return err; 2369 } 2370 2371 *hclk = devm_clk_get(&pdev->dev, "hclk"); 2372 if (IS_ERR(*hclk)) { 2373 err = PTR_ERR(*hclk); 2374 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 2375 return err; 2376 } 2377 2378 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 2379 if (IS_ERR(*tx_clk)) 2380 *tx_clk = NULL; 2381 2382 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); 2383 if (IS_ERR(*rx_clk)) 2384 *rx_clk = NULL; 2385 2386 err = clk_prepare_enable(*pclk); 2387 if (err) { 2388 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 2389 return err; 2390 } 2391 2392 err = clk_prepare_enable(*hclk); 2393 if (err) { 2394 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 2395 goto err_disable_pclk; 2396 } 2397 2398 err = clk_prepare_enable(*tx_clk); 2399 if (err) { 2400 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 2401 goto err_disable_hclk; 2402 } 2403 2404 err = clk_prepare_enable(*rx_clk); 2405 if (err) { 2406 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 2407 goto err_disable_txclk; 2408 } 2409 2410 return 0; 2411 2412 err_disable_txclk: 2413 clk_disable_unprepare(*tx_clk); 2414 2415 err_disable_hclk: 2416 clk_disable_unprepare(*hclk); 2417 2418 err_disable_pclk: 2419 clk_disable_unprepare(*pclk); 2420 2421 return err; 2422 } 2423 2424 static int macb_init(struct platform_device *pdev) 2425 { 2426 struct net_device *dev = platform_get_drvdata(pdev); 2427 unsigned int hw_q, q; 2428 struct macb *bp = netdev_priv(dev); 2429 struct macb_queue *queue; 2430 int err; 2431 u32 val; 2432 2433 /* set the queue register mapping once for all: queue0 has a special 2434 * register mapping but we don't want to test the queue index then 2435 * compute the corresponding register offset at run time. 2436 */ 2437 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 2438 if (!(bp->queue_mask & (1 << hw_q))) 2439 continue; 2440 2441 queue = &bp->queues[q]; 2442 queue->bp = bp; 2443 if (hw_q) { 2444 queue->ISR = GEM_ISR(hw_q - 1); 2445 queue->IER = GEM_IER(hw_q - 1); 2446 queue->IDR = GEM_IDR(hw_q - 1); 2447 queue->IMR = GEM_IMR(hw_q - 1); 2448 queue->TBQP = GEM_TBQP(hw_q - 1); 2449 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2450 queue->TBQPH = GEM_TBQPH(hw_q -1); 2451 #endif 2452 } else { 2453 /* queue0 uses legacy registers */ 2454 queue->ISR = MACB_ISR; 2455 queue->IER = MACB_IER; 2456 queue->IDR = MACB_IDR; 2457 queue->IMR = MACB_IMR; 2458 queue->TBQP = MACB_TBQP; 2459 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2460 queue->TBQPH = MACB_TBQPH; 2461 #endif 2462 } 2463 2464 /* get irq: here we use the linux queue index, not the hardware 2465 * queue index. the queue irq definitions in the device tree 2466 * must remove the optional gaps that could exist in the 2467 * hardware queue mask. 2468 */ 2469 queue->irq = platform_get_irq(pdev, q); 2470 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 2471 IRQF_SHARED, dev->name, queue); 2472 if (err) { 2473 dev_err(&pdev->dev, 2474 "Unable to request IRQ %d (error %d)\n", 2475 queue->irq, err); 2476 return err; 2477 } 2478 2479 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 2480 q++; 2481 } 2482 2483 dev->netdev_ops = &macb_netdev_ops; 2484 netif_napi_add(dev, &bp->napi, macb_poll, 64); 2485 2486 /* setup appropriated routines according to adapter type */ 2487 if (macb_is_gem(bp)) { 2488 bp->max_tx_length = GEM_MAX_TX_LEN; 2489 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 2490 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 2491 bp->macbgem_ops.mog_init_rings = gem_init_rings; 2492 bp->macbgem_ops.mog_rx = gem_rx; 2493 dev->ethtool_ops = &gem_ethtool_ops; 2494 } else { 2495 bp->max_tx_length = MACB_MAX_TX_LEN; 2496 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 2497 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 2498 bp->macbgem_ops.mog_init_rings = macb_init_rings; 2499 bp->macbgem_ops.mog_rx = macb_rx; 2500 dev->ethtool_ops = &macb_ethtool_ops; 2501 } 2502 2503 /* Set features */ 2504 dev->hw_features = NETIF_F_SG; 2505 /* Checksum offload is only available on gem with packet buffer */ 2506 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 2507 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2508 if (bp->caps & MACB_CAPS_SG_DISABLED) 2509 dev->hw_features &= ~NETIF_F_SG; 2510 dev->features = dev->hw_features; 2511 2512 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 2513 val = 0; 2514 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 2515 val = GEM_BIT(RGMII); 2516 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 2517 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 2518 val = MACB_BIT(RMII); 2519 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 2520 val = MACB_BIT(MII); 2521 2522 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 2523 val |= MACB_BIT(CLKEN); 2524 2525 macb_or_gem_writel(bp, USRIO, val); 2526 } 2527 2528 /* Set MII management clock divider */ 2529 val = macb_mdc_clk_div(bp); 2530 val |= macb_dbw(bp); 2531 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 2532 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 2533 macb_writel(bp, NCFGR, val); 2534 2535 return 0; 2536 } 2537 2538 #if defined(CONFIG_OF) 2539 /* 1518 rounded up */ 2540 #define AT91ETHER_MAX_RBUFF_SZ 0x600 2541 /* max number of receive buffers */ 2542 #define AT91ETHER_MAX_RX_DESCR 9 2543 2544 /* Initialize and start the Receiver and Transmit subsystems */ 2545 static int at91ether_start(struct net_device *dev) 2546 { 2547 struct macb *lp = netdev_priv(dev); 2548 dma_addr_t addr; 2549 u32 ctl; 2550 int i; 2551 2552 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 2553 (AT91ETHER_MAX_RX_DESCR * 2554 sizeof(struct macb_dma_desc)), 2555 &lp->rx_ring_dma, GFP_KERNEL); 2556 if (!lp->rx_ring) 2557 return -ENOMEM; 2558 2559 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 2560 AT91ETHER_MAX_RX_DESCR * 2561 AT91ETHER_MAX_RBUFF_SZ, 2562 &lp->rx_buffers_dma, GFP_KERNEL); 2563 if (!lp->rx_buffers) { 2564 dma_free_coherent(&lp->pdev->dev, 2565 AT91ETHER_MAX_RX_DESCR * 2566 sizeof(struct macb_dma_desc), 2567 lp->rx_ring, lp->rx_ring_dma); 2568 lp->rx_ring = NULL; 2569 return -ENOMEM; 2570 } 2571 2572 addr = lp->rx_buffers_dma; 2573 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 2574 lp->rx_ring[i].addr = addr; 2575 lp->rx_ring[i].ctrl = 0; 2576 addr += AT91ETHER_MAX_RBUFF_SZ; 2577 } 2578 2579 /* Set the Wrap bit on the last descriptor */ 2580 lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); 2581 2582 /* Reset buffer index */ 2583 lp->rx_tail = 0; 2584 2585 /* Program address of descriptor list in Rx Buffer Queue register */ 2586 macb_writel(lp, RBQP, lp->rx_ring_dma); 2587 2588 /* Enable Receive and Transmit */ 2589 ctl = macb_readl(lp, NCR); 2590 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 2591 2592 return 0; 2593 } 2594 2595 /* Open the ethernet interface */ 2596 static int at91ether_open(struct net_device *dev) 2597 { 2598 struct macb *lp = netdev_priv(dev); 2599 u32 ctl; 2600 int ret; 2601 2602 /* Clear internal statistics */ 2603 ctl = macb_readl(lp, NCR); 2604 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 2605 2606 macb_set_hwaddr(lp); 2607 2608 ret = at91ether_start(dev); 2609 if (ret) 2610 return ret; 2611 2612 /* Enable MAC interrupts */ 2613 macb_writel(lp, IER, MACB_BIT(RCOMP) | 2614 MACB_BIT(RXUBR) | 2615 MACB_BIT(ISR_TUND) | 2616 MACB_BIT(ISR_RLE) | 2617 MACB_BIT(TCOMP) | 2618 MACB_BIT(ISR_ROVR) | 2619 MACB_BIT(HRESP)); 2620 2621 /* schedule a link state check */ 2622 phy_start(dev->phydev); 2623 2624 netif_start_queue(dev); 2625 2626 return 0; 2627 } 2628 2629 /* Close the interface */ 2630 static int at91ether_close(struct net_device *dev) 2631 { 2632 struct macb *lp = netdev_priv(dev); 2633 u32 ctl; 2634 2635 /* Disable Receiver and Transmitter */ 2636 ctl = macb_readl(lp, NCR); 2637 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 2638 2639 /* Disable MAC interrupts */ 2640 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 2641 MACB_BIT(RXUBR) | 2642 MACB_BIT(ISR_TUND) | 2643 MACB_BIT(ISR_RLE) | 2644 MACB_BIT(TCOMP) | 2645 MACB_BIT(ISR_ROVR) | 2646 MACB_BIT(HRESP)); 2647 2648 netif_stop_queue(dev); 2649 2650 dma_free_coherent(&lp->pdev->dev, 2651 AT91ETHER_MAX_RX_DESCR * 2652 sizeof(struct macb_dma_desc), 2653 lp->rx_ring, lp->rx_ring_dma); 2654 lp->rx_ring = NULL; 2655 2656 dma_free_coherent(&lp->pdev->dev, 2657 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, 2658 lp->rx_buffers, lp->rx_buffers_dma); 2659 lp->rx_buffers = NULL; 2660 2661 return 0; 2662 } 2663 2664 /* Transmit packet */ 2665 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) 2666 { 2667 struct macb *lp = netdev_priv(dev); 2668 2669 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 2670 netif_stop_queue(dev); 2671 2672 /* Store packet information (to free when Tx completed) */ 2673 lp->skb = skb; 2674 lp->skb_length = skb->len; 2675 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 2676 DMA_TO_DEVICE); 2677 2678 /* Set address of the data in the Transmit Address register */ 2679 macb_writel(lp, TAR, lp->skb_physaddr); 2680 /* Set length of the packet in the Transmit Control register */ 2681 macb_writel(lp, TCR, skb->len); 2682 2683 } else { 2684 netdev_err(dev, "%s called, but device is busy!\n", __func__); 2685 return NETDEV_TX_BUSY; 2686 } 2687 2688 return NETDEV_TX_OK; 2689 } 2690 2691 /* Extract received frame from buffer descriptors and sent to upper layers. 2692 * (Called from interrupt context) 2693 */ 2694 static void at91ether_rx(struct net_device *dev) 2695 { 2696 struct macb *lp = netdev_priv(dev); 2697 unsigned char *p_recv; 2698 struct sk_buff *skb; 2699 unsigned int pktlen; 2700 2701 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { 2702 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 2703 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); 2704 skb = netdev_alloc_skb(dev, pktlen + 2); 2705 if (skb) { 2706 skb_reserve(skb, 2); 2707 memcpy(skb_put(skb, pktlen), p_recv, pktlen); 2708 2709 skb->protocol = eth_type_trans(skb, dev); 2710 lp->stats.rx_packets++; 2711 lp->stats.rx_bytes += pktlen; 2712 netif_rx(skb); 2713 } else { 2714 lp->stats.rx_dropped++; 2715 } 2716 2717 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) 2718 lp->stats.multicast++; 2719 2720 /* reset ownership bit */ 2721 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); 2722 2723 /* wrap after last buffer */ 2724 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 2725 lp->rx_tail = 0; 2726 else 2727 lp->rx_tail++; 2728 } 2729 } 2730 2731 /* MAC interrupt handler */ 2732 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 2733 { 2734 struct net_device *dev = dev_id; 2735 struct macb *lp = netdev_priv(dev); 2736 u32 intstatus, ctl; 2737 2738 /* MAC Interrupt Status register indicates what interrupts are pending. 2739 * It is automatically cleared once read. 2740 */ 2741 intstatus = macb_readl(lp, ISR); 2742 2743 /* Receive complete */ 2744 if (intstatus & MACB_BIT(RCOMP)) 2745 at91ether_rx(dev); 2746 2747 /* Transmit complete */ 2748 if (intstatus & MACB_BIT(TCOMP)) { 2749 /* The TCOM bit is set even if the transmission failed */ 2750 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 2751 lp->stats.tx_errors++; 2752 2753 if (lp->skb) { 2754 dev_kfree_skb_irq(lp->skb); 2755 lp->skb = NULL; 2756 dma_unmap_single(NULL, lp->skb_physaddr, 2757 lp->skb_length, DMA_TO_DEVICE); 2758 lp->stats.tx_packets++; 2759 lp->stats.tx_bytes += lp->skb_length; 2760 } 2761 netif_wake_queue(dev); 2762 } 2763 2764 /* Work-around for EMAC Errata section 41.3.1 */ 2765 if (intstatus & MACB_BIT(RXUBR)) { 2766 ctl = macb_readl(lp, NCR); 2767 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 2768 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 2769 } 2770 2771 if (intstatus & MACB_BIT(ISR_ROVR)) 2772 netdev_err(dev, "ROVR error\n"); 2773 2774 return IRQ_HANDLED; 2775 } 2776 2777 #ifdef CONFIG_NET_POLL_CONTROLLER 2778 static void at91ether_poll_controller(struct net_device *dev) 2779 { 2780 unsigned long flags; 2781 2782 local_irq_save(flags); 2783 at91ether_interrupt(dev->irq, dev); 2784 local_irq_restore(flags); 2785 } 2786 #endif 2787 2788 static const struct net_device_ops at91ether_netdev_ops = { 2789 .ndo_open = at91ether_open, 2790 .ndo_stop = at91ether_close, 2791 .ndo_start_xmit = at91ether_start_xmit, 2792 .ndo_get_stats = macb_get_stats, 2793 .ndo_set_rx_mode = macb_set_rx_mode, 2794 .ndo_set_mac_address = eth_mac_addr, 2795 .ndo_do_ioctl = macb_ioctl, 2796 .ndo_validate_addr = eth_validate_addr, 2797 .ndo_change_mtu = eth_change_mtu, 2798 #ifdef CONFIG_NET_POLL_CONTROLLER 2799 .ndo_poll_controller = at91ether_poll_controller, 2800 #endif 2801 }; 2802 2803 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 2804 struct clk **hclk, struct clk **tx_clk, 2805 struct clk **rx_clk) 2806 { 2807 int err; 2808 2809 *hclk = NULL; 2810 *tx_clk = NULL; 2811 *rx_clk = NULL; 2812 2813 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 2814 if (IS_ERR(*pclk)) 2815 return PTR_ERR(*pclk); 2816 2817 err = clk_prepare_enable(*pclk); 2818 if (err) { 2819 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 2820 return err; 2821 } 2822 2823 return 0; 2824 } 2825 2826 static int at91ether_init(struct platform_device *pdev) 2827 { 2828 struct net_device *dev = platform_get_drvdata(pdev); 2829 struct macb *bp = netdev_priv(dev); 2830 int err; 2831 u32 reg; 2832 2833 dev->netdev_ops = &at91ether_netdev_ops; 2834 dev->ethtool_ops = &macb_ethtool_ops; 2835 2836 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 2837 0, dev->name, dev); 2838 if (err) 2839 return err; 2840 2841 macb_writel(bp, NCR, 0); 2842 2843 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); 2844 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 2845 reg |= MACB_BIT(RM9200_RMII); 2846 2847 macb_writel(bp, NCFGR, reg); 2848 2849 return 0; 2850 } 2851 2852 static const struct macb_config at91sam9260_config = { 2853 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 2854 .clk_init = macb_clk_init, 2855 .init = macb_init, 2856 }; 2857 2858 static const struct macb_config pc302gem_config = { 2859 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 2860 .dma_burst_length = 16, 2861 .clk_init = macb_clk_init, 2862 .init = macb_init, 2863 }; 2864 2865 static const struct macb_config sama5d2_config = { 2866 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 2867 .dma_burst_length = 16, 2868 .clk_init = macb_clk_init, 2869 .init = macb_init, 2870 }; 2871 2872 static const struct macb_config sama5d3_config = { 2873 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 2874 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 2875 .dma_burst_length = 16, 2876 .clk_init = macb_clk_init, 2877 .init = macb_init, 2878 }; 2879 2880 static const struct macb_config sama5d4_config = { 2881 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 2882 .dma_burst_length = 4, 2883 .clk_init = macb_clk_init, 2884 .init = macb_init, 2885 }; 2886 2887 static const struct macb_config emac_config = { 2888 .clk_init = at91ether_clk_init, 2889 .init = at91ether_init, 2890 }; 2891 2892 static const struct macb_config np4_config = { 2893 .caps = MACB_CAPS_USRIO_DISABLED, 2894 .clk_init = macb_clk_init, 2895 .init = macb_init, 2896 }; 2897 2898 static const struct macb_config zynqmp_config = { 2899 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO, 2900 .dma_burst_length = 16, 2901 .clk_init = macb_clk_init, 2902 .init = macb_init, 2903 .jumbo_max_len = 10240, 2904 }; 2905 2906 static const struct macb_config zynq_config = { 2907 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 2908 .dma_burst_length = 16, 2909 .clk_init = macb_clk_init, 2910 .init = macb_init, 2911 }; 2912 2913 static const struct of_device_id macb_dt_ids[] = { 2914 { .compatible = "cdns,at32ap7000-macb" }, 2915 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 2916 { .compatible = "cdns,macb" }, 2917 { .compatible = "cdns,np4-macb", .data = &np4_config }, 2918 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 2919 { .compatible = "cdns,gem", .data = &pc302gem_config }, 2920 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 2921 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 2922 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 2923 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 2924 { .compatible = "cdns,emac", .data = &emac_config }, 2925 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 2926 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 2927 { /* sentinel */ } 2928 }; 2929 MODULE_DEVICE_TABLE(of, macb_dt_ids); 2930 #endif /* CONFIG_OF */ 2931 2932 static int macb_probe(struct platform_device *pdev) 2933 { 2934 int (*clk_init)(struct platform_device *, struct clk **, 2935 struct clk **, struct clk **, struct clk **) 2936 = macb_clk_init; 2937 int (*init)(struct platform_device *) = macb_init; 2938 struct device_node *np = pdev->dev.of_node; 2939 struct device_node *phy_node; 2940 const struct macb_config *macb_config = NULL; 2941 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 2942 unsigned int queue_mask, num_queues; 2943 struct macb_platform_data *pdata; 2944 bool native_io; 2945 struct phy_device *phydev; 2946 struct net_device *dev; 2947 struct resource *regs; 2948 void __iomem *mem; 2949 const char *mac; 2950 struct macb *bp; 2951 int err; 2952 2953 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2954 mem = devm_ioremap_resource(&pdev->dev, regs); 2955 if (IS_ERR(mem)) 2956 return PTR_ERR(mem); 2957 2958 if (np) { 2959 const struct of_device_id *match; 2960 2961 match = of_match_node(macb_dt_ids, np); 2962 if (match && match->data) { 2963 macb_config = match->data; 2964 clk_init = macb_config->clk_init; 2965 init = macb_config->init; 2966 } 2967 } 2968 2969 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); 2970 if (err) 2971 return err; 2972 2973 native_io = hw_is_native_io(mem); 2974 2975 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 2976 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2977 if (!dev) { 2978 err = -ENOMEM; 2979 goto err_disable_clocks; 2980 } 2981 2982 dev->base_addr = regs->start; 2983 2984 SET_NETDEV_DEV(dev, &pdev->dev); 2985 2986 bp = netdev_priv(dev); 2987 bp->pdev = pdev; 2988 bp->dev = dev; 2989 bp->regs = mem; 2990 bp->native_io = native_io; 2991 if (native_io) { 2992 bp->macb_reg_readl = hw_readl_native; 2993 bp->macb_reg_writel = hw_writel_native; 2994 } else { 2995 bp->macb_reg_readl = hw_readl; 2996 bp->macb_reg_writel = hw_writel; 2997 } 2998 bp->num_queues = num_queues; 2999 bp->queue_mask = queue_mask; 3000 if (macb_config) 3001 bp->dma_burst_length = macb_config->dma_burst_length; 3002 bp->pclk = pclk; 3003 bp->hclk = hclk; 3004 bp->tx_clk = tx_clk; 3005 bp->rx_clk = rx_clk; 3006 if (macb_config) 3007 bp->jumbo_max_len = macb_config->jumbo_max_len; 3008 3009 bp->wol = 0; 3010 if (of_get_property(np, "magic-packet", NULL)) 3011 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 3012 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 3013 3014 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3015 if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) 3016 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 3017 #endif 3018 3019 spin_lock_init(&bp->lock); 3020 3021 /* setup capabilities */ 3022 macb_configure_caps(bp, macb_config); 3023 3024 platform_set_drvdata(pdev, dev); 3025 3026 dev->irq = platform_get_irq(pdev, 0); 3027 if (dev->irq < 0) { 3028 err = dev->irq; 3029 goto err_out_free_netdev; 3030 } 3031 3032 mac = of_get_mac_address(np); 3033 if (mac) 3034 ether_addr_copy(bp->dev->dev_addr, mac); 3035 else 3036 macb_get_hwaddr(bp); 3037 3038 /* Power up the PHY if there is a GPIO reset */ 3039 phy_node = of_get_next_available_child(np, NULL); 3040 if (phy_node) { 3041 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); 3042 3043 if (gpio_is_valid(gpio)) { 3044 bp->reset_gpio = gpio_to_desc(gpio); 3045 gpiod_direction_output(bp->reset_gpio, 1); 3046 } 3047 } 3048 of_node_put(phy_node); 3049 3050 err = of_get_phy_mode(np); 3051 if (err < 0) { 3052 pdata = dev_get_platdata(&pdev->dev); 3053 if (pdata && pdata->is_rmii) 3054 bp->phy_interface = PHY_INTERFACE_MODE_RMII; 3055 else 3056 bp->phy_interface = PHY_INTERFACE_MODE_MII; 3057 } else { 3058 bp->phy_interface = err; 3059 } 3060 3061 /* IP specific init */ 3062 err = init(pdev); 3063 if (err) 3064 goto err_out_free_netdev; 3065 3066 err = macb_mii_init(bp); 3067 if (err) 3068 goto err_out_free_netdev; 3069 3070 phydev = dev->phydev; 3071 3072 netif_carrier_off(dev); 3073 3074 err = register_netdev(dev); 3075 if (err) { 3076 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 3077 goto err_out_unregister_mdio; 3078 } 3079 3080 phy_attached_info(phydev); 3081 3082 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 3083 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 3084 dev->base_addr, dev->irq, dev->dev_addr); 3085 3086 return 0; 3087 3088 err_out_unregister_mdio: 3089 phy_disconnect(dev->phydev); 3090 mdiobus_unregister(bp->mii_bus); 3091 mdiobus_free(bp->mii_bus); 3092 3093 /* Shutdown the PHY if there is a GPIO reset */ 3094 if (bp->reset_gpio) 3095 gpiod_set_value(bp->reset_gpio, 0); 3096 3097 err_out_free_netdev: 3098 free_netdev(dev); 3099 3100 err_disable_clocks: 3101 clk_disable_unprepare(tx_clk); 3102 clk_disable_unprepare(hclk); 3103 clk_disable_unprepare(pclk); 3104 clk_disable_unprepare(rx_clk); 3105 3106 return err; 3107 } 3108 3109 static int macb_remove(struct platform_device *pdev) 3110 { 3111 struct net_device *dev; 3112 struct macb *bp; 3113 3114 dev = platform_get_drvdata(pdev); 3115 3116 if (dev) { 3117 bp = netdev_priv(dev); 3118 if (dev->phydev) 3119 phy_disconnect(dev->phydev); 3120 mdiobus_unregister(bp->mii_bus); 3121 dev->phydev = NULL; 3122 mdiobus_free(bp->mii_bus); 3123 3124 /* Shutdown the PHY if there is a GPIO reset */ 3125 if (bp->reset_gpio) 3126 gpiod_set_value(bp->reset_gpio, 0); 3127 3128 unregister_netdev(dev); 3129 clk_disable_unprepare(bp->tx_clk); 3130 clk_disable_unprepare(bp->hclk); 3131 clk_disable_unprepare(bp->pclk); 3132 clk_disable_unprepare(bp->rx_clk); 3133 free_netdev(dev); 3134 } 3135 3136 return 0; 3137 } 3138 3139 static int __maybe_unused macb_suspend(struct device *dev) 3140 { 3141 struct platform_device *pdev = to_platform_device(dev); 3142 struct net_device *netdev = platform_get_drvdata(pdev); 3143 struct macb *bp = netdev_priv(netdev); 3144 3145 netif_carrier_off(netdev); 3146 netif_device_detach(netdev); 3147 3148 if (bp->wol & MACB_WOL_ENABLED) { 3149 macb_writel(bp, IER, MACB_BIT(WOL)); 3150 macb_writel(bp, WOL, MACB_BIT(MAG)); 3151 enable_irq_wake(bp->queues[0].irq); 3152 } else { 3153 clk_disable_unprepare(bp->tx_clk); 3154 clk_disable_unprepare(bp->hclk); 3155 clk_disable_unprepare(bp->pclk); 3156 clk_disable_unprepare(bp->rx_clk); 3157 } 3158 3159 return 0; 3160 } 3161 3162 static int __maybe_unused macb_resume(struct device *dev) 3163 { 3164 struct platform_device *pdev = to_platform_device(dev); 3165 struct net_device *netdev = platform_get_drvdata(pdev); 3166 struct macb *bp = netdev_priv(netdev); 3167 3168 if (bp->wol & MACB_WOL_ENABLED) { 3169 macb_writel(bp, IDR, MACB_BIT(WOL)); 3170 macb_writel(bp, WOL, 0); 3171 disable_irq_wake(bp->queues[0].irq); 3172 } else { 3173 clk_prepare_enable(bp->pclk); 3174 clk_prepare_enable(bp->hclk); 3175 clk_prepare_enable(bp->tx_clk); 3176 clk_prepare_enable(bp->rx_clk); 3177 } 3178 3179 netif_device_attach(netdev); 3180 3181 return 0; 3182 } 3183 3184 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); 3185 3186 static struct platform_driver macb_driver = { 3187 .probe = macb_probe, 3188 .remove = macb_remove, 3189 .driver = { 3190 .name = "macb", 3191 .of_match_table = of_match_ptr(macb_dt_ids), 3192 .pm = &macb_pm_ops, 3193 }, 3194 }; 3195 3196 module_platform_driver(macb_driver); 3197 3198 MODULE_LICENSE("GPL"); 3199 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 3200 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 3201 MODULE_ALIAS("platform:macb"); 3202 3203 3204 3205 3206 3207 /* LDV_COMMENT_BEGIN_MAIN */ 3208 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 3209 3210 /*###########################################################################*/ 3211 3212 /*############## Driver Environment Generator 0.2 output ####################*/ 3213 3214 /*###########################################################################*/ 3215 3216 3217 3218 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 3219 void ldv_check_final_state(void); 3220 3221 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 3222 void ldv_check_return_value(int res); 3223 3224 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 3225 void ldv_check_return_value_probe(int res); 3226 3227 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 3228 void ldv_initialize(void); 3229 3230 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 3231 void ldv_handler_precall(void); 3232 3233 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 3234 int nondet_int(void); 3235 3236 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 3237 int LDV_IN_INTERRUPT; 3238 3239 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 3240 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 3241 3242 3243 3244 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 3245 /*============================= VARIABLE DECLARATION PART =============================*/ 3246 /** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/ 3247 /* content: static int macb_get_regs_len(struct net_device *netdev)*/ 3248 /* LDV_COMMENT_BEGIN_PREP */ 3249 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3250 #define MACB_RX_BUFFER_SIZE 128 3251 #define RX_BUFFER_MULTIPLE 64 3252 #define RX_RING_SIZE 512 3253 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3254 #define TX_RING_SIZE 128 3255 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3256 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3257 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3258 | MACB_BIT(ISR_ROVR)) 3259 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3260 | MACB_BIT(ISR_RLE) \ 3261 | MACB_BIT(TXERR)) 3262 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3263 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3264 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3265 #define GEM_MTU_MIN_SIZE 68 3266 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3267 #define MACB_WOL_ENABLED (0x1 << 1) 3268 #define MACB_HALT_TIMEOUT 1230 3269 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3270 #endif 3271 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3272 #endif 3273 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3274 #endif 3275 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3276 #endif 3277 #ifdef CONFIG_NET_POLL_CONTROLLER 3278 #endif 3279 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3280 #endif 3281 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3282 #endif 3283 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3284 #endif 3285 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3286 #endif 3287 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3288 #endif 3289 /* LDV_COMMENT_END_PREP */ 3290 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs_len" */ 3291 struct net_device * var_group1; 3292 /* LDV_COMMENT_BEGIN_PREP */ 3293 #ifdef CONFIG_NET_POLL_CONTROLLER 3294 #endif 3295 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3296 #endif 3297 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3298 #endif 3299 #if defined(CONFIG_OF) 3300 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3301 #define AT91ETHER_MAX_RX_DESCR 9 3302 #ifdef CONFIG_NET_POLL_CONTROLLER 3303 #endif 3304 #ifdef CONFIG_NET_POLL_CONTROLLER 3305 #endif 3306 #endif 3307 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3308 #endif 3309 /* LDV_COMMENT_END_PREP */ 3310 /* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/ 3311 /* LDV_COMMENT_BEGIN_PREP */ 3312 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3313 #define MACB_RX_BUFFER_SIZE 128 3314 #define RX_BUFFER_MULTIPLE 64 3315 #define RX_RING_SIZE 512 3316 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3317 #define TX_RING_SIZE 128 3318 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3319 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3320 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3321 | MACB_BIT(ISR_ROVR)) 3322 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3323 | MACB_BIT(ISR_RLE) \ 3324 | MACB_BIT(TXERR)) 3325 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3326 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3327 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3328 #define GEM_MTU_MIN_SIZE 68 3329 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3330 #define MACB_WOL_ENABLED (0x1 << 1) 3331 #define MACB_HALT_TIMEOUT 1230 3332 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3333 #endif 3334 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3335 #endif 3336 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3337 #endif 3338 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3339 #endif 3340 #ifdef CONFIG_NET_POLL_CONTROLLER 3341 #endif 3342 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3343 #endif 3344 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3345 #endif 3346 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3347 #endif 3348 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3349 #endif 3350 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3351 #endif 3352 /* LDV_COMMENT_END_PREP */ 3353 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs" */ 3354 struct ethtool_regs * var_group2; 3355 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs" */ 3356 void * var_macb_get_regs_68_p2; 3357 /* LDV_COMMENT_BEGIN_PREP */ 3358 #ifdef CONFIG_NET_POLL_CONTROLLER 3359 #endif 3360 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3361 #endif 3362 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3363 #endif 3364 #if defined(CONFIG_OF) 3365 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3366 #define AT91ETHER_MAX_RX_DESCR 9 3367 #ifdef CONFIG_NET_POLL_CONTROLLER 3368 #endif 3369 #ifdef CONFIG_NET_POLL_CONTROLLER 3370 #endif 3371 #endif 3372 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3373 #endif 3374 /* LDV_COMMENT_END_PREP */ 3375 /* content: static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/ 3376 /* LDV_COMMENT_BEGIN_PREP */ 3377 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3378 #define MACB_RX_BUFFER_SIZE 128 3379 #define RX_BUFFER_MULTIPLE 64 3380 #define RX_RING_SIZE 512 3381 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3382 #define TX_RING_SIZE 128 3383 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3384 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3385 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3386 | MACB_BIT(ISR_ROVR)) 3387 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3388 | MACB_BIT(ISR_RLE) \ 3389 | MACB_BIT(TXERR)) 3390 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3391 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3392 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3393 #define GEM_MTU_MIN_SIZE 68 3394 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3395 #define MACB_WOL_ENABLED (0x1 << 1) 3396 #define MACB_HALT_TIMEOUT 1230 3397 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3398 #endif 3399 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3400 #endif 3401 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3402 #endif 3403 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3404 #endif 3405 #ifdef CONFIG_NET_POLL_CONTROLLER 3406 #endif 3407 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3408 #endif 3409 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3410 #endif 3411 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3412 #endif 3413 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3414 #endif 3415 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3416 #endif 3417 /* LDV_COMMENT_END_PREP */ 3418 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_wol" */ 3419 struct ethtool_wolinfo * var_group3; 3420 /* LDV_COMMENT_BEGIN_PREP */ 3421 #ifdef CONFIG_NET_POLL_CONTROLLER 3422 #endif 3423 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3424 #endif 3425 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3426 #endif 3427 #if defined(CONFIG_OF) 3428 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3429 #define AT91ETHER_MAX_RX_DESCR 9 3430 #ifdef CONFIG_NET_POLL_CONTROLLER 3431 #endif 3432 #ifdef CONFIG_NET_POLL_CONTROLLER 3433 #endif 3434 #endif 3435 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3436 #endif 3437 /* LDV_COMMENT_END_PREP */ 3438 /* content: static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/ 3439 /* LDV_COMMENT_BEGIN_PREP */ 3440 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3441 #define MACB_RX_BUFFER_SIZE 128 3442 #define RX_BUFFER_MULTIPLE 64 3443 #define RX_RING_SIZE 512 3444 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3445 #define TX_RING_SIZE 128 3446 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3447 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3448 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3449 | MACB_BIT(ISR_ROVR)) 3450 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3451 | MACB_BIT(ISR_RLE) \ 3452 | MACB_BIT(TXERR)) 3453 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3454 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3455 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3456 #define GEM_MTU_MIN_SIZE 68 3457 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3458 #define MACB_WOL_ENABLED (0x1 << 1) 3459 #define MACB_HALT_TIMEOUT 1230 3460 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3461 #endif 3462 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3463 #endif 3464 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3465 #endif 3466 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3467 #endif 3468 #ifdef CONFIG_NET_POLL_CONTROLLER 3469 #endif 3470 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3471 #endif 3472 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3473 #endif 3474 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3475 #endif 3476 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3477 #endif 3478 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3479 #endif 3480 /* LDV_COMMENT_END_PREP */ 3481 /* LDV_COMMENT_BEGIN_PREP */ 3482 #ifdef CONFIG_NET_POLL_CONTROLLER 3483 #endif 3484 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3485 #endif 3486 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3487 #endif 3488 #if defined(CONFIG_OF) 3489 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3490 #define AT91ETHER_MAX_RX_DESCR 9 3491 #ifdef CONFIG_NET_POLL_CONTROLLER 3492 #endif 3493 #ifdef CONFIG_NET_POLL_CONTROLLER 3494 #endif 3495 #endif 3496 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3497 #endif 3498 /* LDV_COMMENT_END_PREP */ 3499 3500 /** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/ 3501 /* content: static int macb_get_regs_len(struct net_device *netdev)*/ 3502 /* LDV_COMMENT_BEGIN_PREP */ 3503 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3504 #define MACB_RX_BUFFER_SIZE 128 3505 #define RX_BUFFER_MULTIPLE 64 3506 #define RX_RING_SIZE 512 3507 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3508 #define TX_RING_SIZE 128 3509 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3510 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3511 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3512 | MACB_BIT(ISR_ROVR)) 3513 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3514 | MACB_BIT(ISR_RLE) \ 3515 | MACB_BIT(TXERR)) 3516 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3517 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3518 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3519 #define GEM_MTU_MIN_SIZE 68 3520 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3521 #define MACB_WOL_ENABLED (0x1 << 1) 3522 #define MACB_HALT_TIMEOUT 1230 3523 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3524 #endif 3525 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3526 #endif 3527 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3528 #endif 3529 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3530 #endif 3531 #ifdef CONFIG_NET_POLL_CONTROLLER 3532 #endif 3533 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3534 #endif 3535 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3536 #endif 3537 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3538 #endif 3539 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3540 #endif 3541 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3542 #endif 3543 /* LDV_COMMENT_END_PREP */ 3544 /* LDV_COMMENT_BEGIN_PREP */ 3545 #ifdef CONFIG_NET_POLL_CONTROLLER 3546 #endif 3547 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3548 #endif 3549 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3550 #endif 3551 #if defined(CONFIG_OF) 3552 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3553 #define AT91ETHER_MAX_RX_DESCR 9 3554 #ifdef CONFIG_NET_POLL_CONTROLLER 3555 #endif 3556 #ifdef CONFIG_NET_POLL_CONTROLLER 3557 #endif 3558 #endif 3559 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3560 #endif 3561 /* LDV_COMMENT_END_PREP */ 3562 /* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/ 3563 /* LDV_COMMENT_BEGIN_PREP */ 3564 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3565 #define MACB_RX_BUFFER_SIZE 128 3566 #define RX_BUFFER_MULTIPLE 64 3567 #define RX_RING_SIZE 512 3568 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3569 #define TX_RING_SIZE 128 3570 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3571 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3572 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3573 | MACB_BIT(ISR_ROVR)) 3574 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3575 | MACB_BIT(ISR_RLE) \ 3576 | MACB_BIT(TXERR)) 3577 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3578 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3579 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3580 #define GEM_MTU_MIN_SIZE 68 3581 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3582 #define MACB_WOL_ENABLED (0x1 << 1) 3583 #define MACB_HALT_TIMEOUT 1230 3584 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3585 #endif 3586 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3587 #endif 3588 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3589 #endif 3590 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3591 #endif 3592 #ifdef CONFIG_NET_POLL_CONTROLLER 3593 #endif 3594 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3595 #endif 3596 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3597 #endif 3598 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3599 #endif 3600 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3601 #endif 3602 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3603 #endif 3604 /* LDV_COMMENT_END_PREP */ 3605 /* LDV_COMMENT_BEGIN_PREP */ 3606 #ifdef CONFIG_NET_POLL_CONTROLLER 3607 #endif 3608 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3609 #endif 3610 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3611 #endif 3612 #if defined(CONFIG_OF) 3613 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3614 #define AT91ETHER_MAX_RX_DESCR 9 3615 #ifdef CONFIG_NET_POLL_CONTROLLER 3616 #endif 3617 #ifdef CONFIG_NET_POLL_CONTROLLER 3618 #endif 3619 #endif 3620 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3621 #endif 3622 /* LDV_COMMENT_END_PREP */ 3623 /* content: static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)*/ 3624 /* LDV_COMMENT_BEGIN_PREP */ 3625 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3626 #define MACB_RX_BUFFER_SIZE 128 3627 #define RX_BUFFER_MULTIPLE 64 3628 #define RX_RING_SIZE 512 3629 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3630 #define TX_RING_SIZE 128 3631 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3632 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3633 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3634 | MACB_BIT(ISR_ROVR)) 3635 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3636 | MACB_BIT(ISR_RLE) \ 3637 | MACB_BIT(TXERR)) 3638 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3639 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3640 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3641 #define GEM_MTU_MIN_SIZE 68 3642 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3643 #define MACB_WOL_ENABLED (0x1 << 1) 3644 #define MACB_HALT_TIMEOUT 1230 3645 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3646 #endif 3647 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3648 #endif 3649 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3650 #endif 3651 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3652 #endif 3653 #ifdef CONFIG_NET_POLL_CONTROLLER 3654 #endif 3655 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3656 #endif 3657 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3658 #endif 3659 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3660 #endif 3661 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3662 #endif 3663 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3664 #endif 3665 /* LDV_COMMENT_END_PREP */ 3666 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_stats" */ 3667 struct ethtool_stats * var_group4; 3668 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_stats" */ 3669 u64 * var_gem_get_ethtool_stats_63_p2; 3670 /* LDV_COMMENT_BEGIN_PREP */ 3671 #ifdef CONFIG_NET_POLL_CONTROLLER 3672 #endif 3673 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3674 #endif 3675 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3676 #endif 3677 #if defined(CONFIG_OF) 3678 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3679 #define AT91ETHER_MAX_RX_DESCR 9 3680 #ifdef CONFIG_NET_POLL_CONTROLLER 3681 #endif 3682 #ifdef CONFIG_NET_POLL_CONTROLLER 3683 #endif 3684 #endif 3685 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3686 #endif 3687 /* LDV_COMMENT_END_PREP */ 3688 /* content: static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)*/ 3689 /* LDV_COMMENT_BEGIN_PREP */ 3690 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3691 #define MACB_RX_BUFFER_SIZE 128 3692 #define RX_BUFFER_MULTIPLE 64 3693 #define RX_RING_SIZE 512 3694 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3695 #define TX_RING_SIZE 128 3696 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3697 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3698 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3699 | MACB_BIT(ISR_ROVR)) 3700 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3701 | MACB_BIT(ISR_RLE) \ 3702 | MACB_BIT(TXERR)) 3703 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3704 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3705 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3706 #define GEM_MTU_MIN_SIZE 68 3707 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3708 #define MACB_WOL_ENABLED (0x1 << 1) 3709 #define MACB_HALT_TIMEOUT 1230 3710 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3711 #endif 3712 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3713 #endif 3714 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3715 #endif 3716 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3717 #endif 3718 #ifdef CONFIG_NET_POLL_CONTROLLER 3719 #endif 3720 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3721 #endif 3722 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3723 #endif 3724 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3725 #endif 3726 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3727 #endif 3728 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3729 #endif 3730 /* LDV_COMMENT_END_PREP */ 3731 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_strings" */ 3732 u32 var_gem_get_ethtool_strings_65_p1; 3733 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_strings" */ 3734 u8 * var_gem_get_ethtool_strings_65_p2; 3735 /* LDV_COMMENT_BEGIN_PREP */ 3736 #ifdef CONFIG_NET_POLL_CONTROLLER 3737 #endif 3738 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3739 #endif 3740 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3741 #endif 3742 #if defined(CONFIG_OF) 3743 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3744 #define AT91ETHER_MAX_RX_DESCR 9 3745 #ifdef CONFIG_NET_POLL_CONTROLLER 3746 #endif 3747 #ifdef CONFIG_NET_POLL_CONTROLLER 3748 #endif 3749 #endif 3750 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3751 #endif 3752 /* LDV_COMMENT_END_PREP */ 3753 /* content: static int gem_get_sset_count(struct net_device *dev, int sset)*/ 3754 /* LDV_COMMENT_BEGIN_PREP */ 3755 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3756 #define MACB_RX_BUFFER_SIZE 128 3757 #define RX_BUFFER_MULTIPLE 64 3758 #define RX_RING_SIZE 512 3759 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3760 #define TX_RING_SIZE 128 3761 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3762 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3763 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3764 | MACB_BIT(ISR_ROVR)) 3765 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3766 | MACB_BIT(ISR_RLE) \ 3767 | MACB_BIT(TXERR)) 3768 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3769 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3770 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3771 #define GEM_MTU_MIN_SIZE 68 3772 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3773 #define MACB_WOL_ENABLED (0x1 << 1) 3774 #define MACB_HALT_TIMEOUT 1230 3775 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3776 #endif 3777 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3778 #endif 3779 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3780 #endif 3781 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3782 #endif 3783 #ifdef CONFIG_NET_POLL_CONTROLLER 3784 #endif 3785 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3786 #endif 3787 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3788 #endif 3789 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3790 #endif 3791 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3792 #endif 3793 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3794 #endif 3795 /* LDV_COMMENT_END_PREP */ 3796 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_sset_count" */ 3797 int var_gem_get_sset_count_64_p1; 3798 /* LDV_COMMENT_BEGIN_PREP */ 3799 #ifdef CONFIG_NET_POLL_CONTROLLER 3800 #endif 3801 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3802 #endif 3803 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3804 #endif 3805 #if defined(CONFIG_OF) 3806 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3807 #define AT91ETHER_MAX_RX_DESCR 9 3808 #ifdef CONFIG_NET_POLL_CONTROLLER 3809 #endif 3810 #ifdef CONFIG_NET_POLL_CONTROLLER 3811 #endif 3812 #endif 3813 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3814 #endif 3815 /* LDV_COMMENT_END_PREP */ 3816 3817 /** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/ 3818 /* content: static int macb_open(struct net_device *dev)*/ 3819 /* LDV_COMMENT_BEGIN_PREP */ 3820 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3821 #define MACB_RX_BUFFER_SIZE 128 3822 #define RX_BUFFER_MULTIPLE 64 3823 #define RX_RING_SIZE 512 3824 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3825 #define TX_RING_SIZE 128 3826 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3827 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3828 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3829 | MACB_BIT(ISR_ROVR)) 3830 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3831 | MACB_BIT(ISR_RLE) \ 3832 | MACB_BIT(TXERR)) 3833 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3834 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3835 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3836 #define GEM_MTU_MIN_SIZE 68 3837 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3838 #define MACB_WOL_ENABLED (0x1 << 1) 3839 #define MACB_HALT_TIMEOUT 1230 3840 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3841 #endif 3842 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3843 #endif 3844 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3845 #endif 3846 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3847 #endif 3848 #ifdef CONFIG_NET_POLL_CONTROLLER 3849 #endif 3850 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3851 #endif 3852 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3853 #endif 3854 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3855 #endif 3856 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3857 #endif 3858 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3859 #endif 3860 /* LDV_COMMENT_END_PREP */ 3861 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_open" */ 3862 static int res_macb_open_58; 3863 /* LDV_COMMENT_BEGIN_PREP */ 3864 #ifdef CONFIG_NET_POLL_CONTROLLER 3865 #endif 3866 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3867 #endif 3868 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3869 #endif 3870 #if defined(CONFIG_OF) 3871 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3872 #define AT91ETHER_MAX_RX_DESCR 9 3873 #ifdef CONFIG_NET_POLL_CONTROLLER 3874 #endif 3875 #ifdef CONFIG_NET_POLL_CONTROLLER 3876 #endif 3877 #endif 3878 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3879 #endif 3880 /* LDV_COMMENT_END_PREP */ 3881 /* content: static int macb_close(struct net_device *dev)*/ 3882 /* LDV_COMMENT_BEGIN_PREP */ 3883 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3884 #define MACB_RX_BUFFER_SIZE 128 3885 #define RX_BUFFER_MULTIPLE 64 3886 #define RX_RING_SIZE 512 3887 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3888 #define TX_RING_SIZE 128 3889 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3890 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3891 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3892 | MACB_BIT(ISR_ROVR)) 3893 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3894 | MACB_BIT(ISR_RLE) \ 3895 | MACB_BIT(TXERR)) 3896 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3897 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3898 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3899 #define GEM_MTU_MIN_SIZE 68 3900 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3901 #define MACB_WOL_ENABLED (0x1 << 1) 3902 #define MACB_HALT_TIMEOUT 1230 3903 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3904 #endif 3905 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3906 #endif 3907 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3908 #endif 3909 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3910 #endif 3911 #ifdef CONFIG_NET_POLL_CONTROLLER 3912 #endif 3913 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3914 #endif 3915 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3916 #endif 3917 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3918 #endif 3919 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3920 #endif 3921 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3922 #endif 3923 /* LDV_COMMENT_END_PREP */ 3924 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_close" */ 3925 static int res_macb_close_59; 3926 /* LDV_COMMENT_BEGIN_PREP */ 3927 #ifdef CONFIG_NET_POLL_CONTROLLER 3928 #endif 3929 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3930 #endif 3931 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3932 #endif 3933 #if defined(CONFIG_OF) 3934 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3935 #define AT91ETHER_MAX_RX_DESCR 9 3936 #ifdef CONFIG_NET_POLL_CONTROLLER 3937 #endif 3938 #ifdef CONFIG_NET_POLL_CONTROLLER 3939 #endif 3940 #endif 3941 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3942 #endif 3943 /* LDV_COMMENT_END_PREP */ 3944 /* content: static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)*/ 3945 /* LDV_COMMENT_BEGIN_PREP */ 3946 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3947 #define MACB_RX_BUFFER_SIZE 128 3948 #define RX_BUFFER_MULTIPLE 64 3949 #define RX_RING_SIZE 512 3950 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 3951 #define TX_RING_SIZE 128 3952 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 3953 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 3954 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 3955 | MACB_BIT(ISR_ROVR)) 3956 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 3957 | MACB_BIT(ISR_RLE) \ 3958 | MACB_BIT(TXERR)) 3959 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 3960 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 3961 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 3962 #define GEM_MTU_MIN_SIZE 68 3963 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 3964 #define MACB_WOL_ENABLED (0x1 << 1) 3965 #define MACB_HALT_TIMEOUT 1230 3966 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3967 #endif 3968 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3969 #endif 3970 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3971 #endif 3972 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 3973 #endif 3974 #ifdef CONFIG_NET_POLL_CONTROLLER 3975 #endif 3976 /* LDV_COMMENT_END_PREP */ 3977 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_start_xmit" */ 3978 struct sk_buff * var_group5; 3979 /* LDV_COMMENT_BEGIN_PREP */ 3980 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3981 #endif 3982 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3983 #endif 3984 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3985 #endif 3986 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3987 #endif 3988 #ifdef CONFIG_NET_POLL_CONTROLLER 3989 #endif 3990 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3991 #endif 3992 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3993 #endif 3994 #if defined(CONFIG_OF) 3995 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3996 #define AT91ETHER_MAX_RX_DESCR 9 3997 #ifdef CONFIG_NET_POLL_CONTROLLER 3998 #endif 3999 #ifdef CONFIG_NET_POLL_CONTROLLER 4000 #endif 4001 #endif 4002 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4003 #endif 4004 /* LDV_COMMENT_END_PREP */ 4005 /* content: static void macb_set_rx_mode(struct net_device *dev)*/ 4006 /* LDV_COMMENT_BEGIN_PREP */ 4007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4008 #define MACB_RX_BUFFER_SIZE 128 4009 #define RX_BUFFER_MULTIPLE 64 4010 #define RX_RING_SIZE 512 4011 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4012 #define TX_RING_SIZE 128 4013 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4014 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4015 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4016 | MACB_BIT(ISR_ROVR)) 4017 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4018 | MACB_BIT(ISR_RLE) \ 4019 | MACB_BIT(TXERR)) 4020 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4021 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4022 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4023 #define GEM_MTU_MIN_SIZE 68 4024 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4025 #define MACB_WOL_ENABLED (0x1 << 1) 4026 #define MACB_HALT_TIMEOUT 1230 4027 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4028 #endif 4029 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4030 #endif 4031 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4032 #endif 4033 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4034 #endif 4035 #ifdef CONFIG_NET_POLL_CONTROLLER 4036 #endif 4037 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4038 #endif 4039 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4040 #endif 4041 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4042 #endif 4043 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4044 #endif 4045 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4046 #endif 4047 /* LDV_COMMENT_END_PREP */ 4048 /* LDV_COMMENT_BEGIN_PREP */ 4049 #ifdef CONFIG_NET_POLL_CONTROLLER 4050 #endif 4051 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4052 #endif 4053 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4054 #endif 4055 #if defined(CONFIG_OF) 4056 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4057 #define AT91ETHER_MAX_RX_DESCR 9 4058 #ifdef CONFIG_NET_POLL_CONTROLLER 4059 #endif 4060 #ifdef CONFIG_NET_POLL_CONTROLLER 4061 #endif 4062 #endif 4063 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4064 #endif 4065 /* LDV_COMMENT_END_PREP */ 4066 /* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/ 4067 /* LDV_COMMENT_BEGIN_PREP */ 4068 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4069 #define MACB_RX_BUFFER_SIZE 128 4070 #define RX_BUFFER_MULTIPLE 64 4071 #define RX_RING_SIZE 512 4072 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4073 #define TX_RING_SIZE 128 4074 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4075 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4076 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4077 | MACB_BIT(ISR_ROVR)) 4078 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4079 | MACB_BIT(ISR_RLE) \ 4080 | MACB_BIT(TXERR)) 4081 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4082 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4083 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4084 #define GEM_MTU_MIN_SIZE 68 4085 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4086 #define MACB_WOL_ENABLED (0x1 << 1) 4087 #define MACB_HALT_TIMEOUT 1230 4088 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4089 #endif 4090 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4091 #endif 4092 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4093 #endif 4094 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4095 #endif 4096 #ifdef CONFIG_NET_POLL_CONTROLLER 4097 #endif 4098 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4099 #endif 4100 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4101 #endif 4102 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4103 #endif 4104 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4105 #endif 4106 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4107 #endif 4108 /* LDV_COMMENT_END_PREP */ 4109 /* LDV_COMMENT_BEGIN_PREP */ 4110 #ifdef CONFIG_NET_POLL_CONTROLLER 4111 #endif 4112 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4113 #endif 4114 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4115 #endif 4116 #if defined(CONFIG_OF) 4117 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4118 #define AT91ETHER_MAX_RX_DESCR 9 4119 #ifdef CONFIG_NET_POLL_CONTROLLER 4120 #endif 4121 #ifdef CONFIG_NET_POLL_CONTROLLER 4122 #endif 4123 #endif 4124 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4125 #endif 4126 /* LDV_COMMENT_END_PREP */ 4127 /* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/ 4128 /* LDV_COMMENT_BEGIN_PREP */ 4129 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4130 #define MACB_RX_BUFFER_SIZE 128 4131 #define RX_BUFFER_MULTIPLE 64 4132 #define RX_RING_SIZE 512 4133 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4134 #define TX_RING_SIZE 128 4135 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4136 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4137 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4138 | MACB_BIT(ISR_ROVR)) 4139 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4140 | MACB_BIT(ISR_RLE) \ 4141 | MACB_BIT(TXERR)) 4142 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4143 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4144 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4145 #define GEM_MTU_MIN_SIZE 68 4146 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4147 #define MACB_WOL_ENABLED (0x1 << 1) 4148 #define MACB_HALT_TIMEOUT 1230 4149 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4150 #endif 4151 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4152 #endif 4153 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4154 #endif 4155 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4156 #endif 4157 #ifdef CONFIG_NET_POLL_CONTROLLER 4158 #endif 4159 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4160 #endif 4161 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4162 #endif 4163 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4164 #endif 4165 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4166 #endif 4167 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4168 #endif 4169 /* LDV_COMMENT_END_PREP */ 4170 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_ioctl" */ 4171 struct ifreq * var_group6; 4172 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_ioctl" */ 4173 int var_macb_ioctl_71_p2; 4174 /* LDV_COMMENT_BEGIN_PREP */ 4175 #ifdef CONFIG_NET_POLL_CONTROLLER 4176 #endif 4177 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4178 #endif 4179 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4180 #endif 4181 #if defined(CONFIG_OF) 4182 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4183 #define AT91ETHER_MAX_RX_DESCR 9 4184 #ifdef CONFIG_NET_POLL_CONTROLLER 4185 #endif 4186 #ifdef CONFIG_NET_POLL_CONTROLLER 4187 #endif 4188 #endif 4189 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4190 #endif 4191 /* LDV_COMMENT_END_PREP */ 4192 /* content: static int macb_change_mtu(struct net_device *dev, int new_mtu)*/ 4193 /* LDV_COMMENT_BEGIN_PREP */ 4194 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4195 #define MACB_RX_BUFFER_SIZE 128 4196 #define RX_BUFFER_MULTIPLE 64 4197 #define RX_RING_SIZE 512 4198 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4199 #define TX_RING_SIZE 128 4200 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4201 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4202 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4203 | MACB_BIT(ISR_ROVR)) 4204 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4205 | MACB_BIT(ISR_RLE) \ 4206 | MACB_BIT(TXERR)) 4207 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4208 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4209 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4210 #define GEM_MTU_MIN_SIZE 68 4211 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4212 #define MACB_WOL_ENABLED (0x1 << 1) 4213 #define MACB_HALT_TIMEOUT 1230 4214 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4215 #endif 4216 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4217 #endif 4218 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4219 #endif 4220 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4221 #endif 4222 #ifdef CONFIG_NET_POLL_CONTROLLER 4223 #endif 4224 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4225 #endif 4226 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4227 #endif 4228 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4229 #endif 4230 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4231 #endif 4232 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4233 #endif 4234 /* LDV_COMMENT_END_PREP */ 4235 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_change_mtu" */ 4236 int var_macb_change_mtu_60_p1; 4237 /* LDV_COMMENT_BEGIN_PREP */ 4238 #ifdef CONFIG_NET_POLL_CONTROLLER 4239 #endif 4240 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4241 #endif 4242 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4243 #endif 4244 #if defined(CONFIG_OF) 4245 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4246 #define AT91ETHER_MAX_RX_DESCR 9 4247 #ifdef CONFIG_NET_POLL_CONTROLLER 4248 #endif 4249 #ifdef CONFIG_NET_POLL_CONTROLLER 4250 #endif 4251 #endif 4252 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4253 #endif 4254 /* LDV_COMMENT_END_PREP */ 4255 /* content: static void macb_poll_controller(struct net_device *dev)*/ 4256 /* LDV_COMMENT_BEGIN_PREP */ 4257 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4258 #define MACB_RX_BUFFER_SIZE 128 4259 #define RX_BUFFER_MULTIPLE 64 4260 #define RX_RING_SIZE 512 4261 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4262 #define TX_RING_SIZE 128 4263 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4264 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4265 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4266 | MACB_BIT(ISR_ROVR)) 4267 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4268 | MACB_BIT(ISR_RLE) \ 4269 | MACB_BIT(TXERR)) 4270 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4271 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4272 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4273 #define GEM_MTU_MIN_SIZE 68 4274 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4275 #define MACB_WOL_ENABLED (0x1 << 1) 4276 #define MACB_HALT_TIMEOUT 1230 4277 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4278 #endif 4279 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4280 #endif 4281 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4282 #endif 4283 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4284 #endif 4285 #ifdef CONFIG_NET_POLL_CONTROLLER 4286 /* LDV_COMMENT_END_PREP */ 4287 /* LDV_COMMENT_BEGIN_PREP */ 4288 #endif 4289 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4290 #endif 4291 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4292 #endif 4293 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4294 #endif 4295 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4296 #endif 4297 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4298 #endif 4299 #ifdef CONFIG_NET_POLL_CONTROLLER 4300 #endif 4301 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4302 #endif 4303 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4304 #endif 4305 #if defined(CONFIG_OF) 4306 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4307 #define AT91ETHER_MAX_RX_DESCR 9 4308 #ifdef CONFIG_NET_POLL_CONTROLLER 4309 #endif 4310 #ifdef CONFIG_NET_POLL_CONTROLLER 4311 #endif 4312 #endif 4313 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4314 #endif 4315 /* LDV_COMMENT_END_PREP */ 4316 /* content: static int macb_set_features(struct net_device *netdev, netdev_features_t features)*/ 4317 /* LDV_COMMENT_BEGIN_PREP */ 4318 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4319 #define MACB_RX_BUFFER_SIZE 128 4320 #define RX_BUFFER_MULTIPLE 64 4321 #define RX_RING_SIZE 512 4322 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4323 #define TX_RING_SIZE 128 4324 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4325 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4326 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4327 | MACB_BIT(ISR_ROVR)) 4328 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4329 | MACB_BIT(ISR_RLE) \ 4330 | MACB_BIT(TXERR)) 4331 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4332 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4333 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4334 #define GEM_MTU_MIN_SIZE 68 4335 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4336 #define MACB_WOL_ENABLED (0x1 << 1) 4337 #define MACB_HALT_TIMEOUT 1230 4338 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4339 #endif 4340 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4341 #endif 4342 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4343 #endif 4344 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4345 #endif 4346 #ifdef CONFIG_NET_POLL_CONTROLLER 4347 #endif 4348 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4349 #endif 4350 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4351 #endif 4352 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4353 #endif 4354 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4355 #endif 4356 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4357 #endif 4358 /* LDV_COMMENT_END_PREP */ 4359 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_set_features" */ 4360 netdev_features_t var_macb_set_features_72_p1; 4361 /* LDV_COMMENT_BEGIN_PREP */ 4362 #ifdef CONFIG_NET_POLL_CONTROLLER 4363 #endif 4364 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4365 #endif 4366 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4367 #endif 4368 #if defined(CONFIG_OF) 4369 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4370 #define AT91ETHER_MAX_RX_DESCR 9 4371 #ifdef CONFIG_NET_POLL_CONTROLLER 4372 #endif 4373 #ifdef CONFIG_NET_POLL_CONTROLLER 4374 #endif 4375 #endif 4376 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4377 #endif 4378 /* LDV_COMMENT_END_PREP */ 4379 4380 /** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/ 4381 /* content: static int at91ether_open(struct net_device *dev)*/ 4382 /* LDV_COMMENT_BEGIN_PREP */ 4383 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4384 #define MACB_RX_BUFFER_SIZE 128 4385 #define RX_BUFFER_MULTIPLE 64 4386 #define RX_RING_SIZE 512 4387 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4388 #define TX_RING_SIZE 128 4389 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4390 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4391 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4392 | MACB_BIT(ISR_ROVR)) 4393 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4394 | MACB_BIT(ISR_RLE) \ 4395 | MACB_BIT(TXERR)) 4396 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4397 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4398 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4399 #define GEM_MTU_MIN_SIZE 68 4400 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4401 #define MACB_WOL_ENABLED (0x1 << 1) 4402 #define MACB_HALT_TIMEOUT 1230 4403 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4404 #endif 4405 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4406 #endif 4407 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4408 #endif 4409 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4410 #endif 4411 #ifdef CONFIG_NET_POLL_CONTROLLER 4412 #endif 4413 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4414 #endif 4415 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4416 #endif 4417 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4418 #endif 4419 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4420 #endif 4421 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4422 #endif 4423 #ifdef CONFIG_NET_POLL_CONTROLLER 4424 #endif 4425 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4426 #endif 4427 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4428 #endif 4429 #if defined(CONFIG_OF) 4430 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4431 #define AT91ETHER_MAX_RX_DESCR 9 4432 /* LDV_COMMENT_END_PREP */ 4433 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "at91ether_open" */ 4434 static int res_at91ether_open_78; 4435 /* LDV_COMMENT_BEGIN_PREP */ 4436 #ifdef CONFIG_NET_POLL_CONTROLLER 4437 #endif 4438 #ifdef CONFIG_NET_POLL_CONTROLLER 4439 #endif 4440 #endif 4441 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4442 #endif 4443 /* LDV_COMMENT_END_PREP */ 4444 /* content: static int at91ether_close(struct net_device *dev)*/ 4445 /* LDV_COMMENT_BEGIN_PREP */ 4446 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4447 #define MACB_RX_BUFFER_SIZE 128 4448 #define RX_BUFFER_MULTIPLE 64 4449 #define RX_RING_SIZE 512 4450 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4451 #define TX_RING_SIZE 128 4452 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4453 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4454 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4455 | MACB_BIT(ISR_ROVR)) 4456 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4457 | MACB_BIT(ISR_RLE) \ 4458 | MACB_BIT(TXERR)) 4459 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4460 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4461 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4462 #define GEM_MTU_MIN_SIZE 68 4463 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4464 #define MACB_WOL_ENABLED (0x1 << 1) 4465 #define MACB_HALT_TIMEOUT 1230 4466 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4467 #endif 4468 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4469 #endif 4470 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4471 #endif 4472 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4473 #endif 4474 #ifdef CONFIG_NET_POLL_CONTROLLER 4475 #endif 4476 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4477 #endif 4478 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4479 #endif 4480 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4481 #endif 4482 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4483 #endif 4484 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4485 #endif 4486 #ifdef CONFIG_NET_POLL_CONTROLLER 4487 #endif 4488 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4489 #endif 4490 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4491 #endif 4492 #if defined(CONFIG_OF) 4493 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4494 #define AT91ETHER_MAX_RX_DESCR 9 4495 /* LDV_COMMENT_END_PREP */ 4496 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "at91ether_close" */ 4497 static int res_at91ether_close_79; 4498 /* LDV_COMMENT_BEGIN_PREP */ 4499 #ifdef CONFIG_NET_POLL_CONTROLLER 4500 #endif 4501 #ifdef CONFIG_NET_POLL_CONTROLLER 4502 #endif 4503 #endif 4504 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4505 #endif 4506 /* LDV_COMMENT_END_PREP */ 4507 /* content: static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)*/ 4508 /* LDV_COMMENT_BEGIN_PREP */ 4509 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4510 #define MACB_RX_BUFFER_SIZE 128 4511 #define RX_BUFFER_MULTIPLE 64 4512 #define RX_RING_SIZE 512 4513 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4514 #define TX_RING_SIZE 128 4515 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4516 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4517 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4518 | MACB_BIT(ISR_ROVR)) 4519 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4520 | MACB_BIT(ISR_RLE) \ 4521 | MACB_BIT(TXERR)) 4522 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4523 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4524 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4525 #define GEM_MTU_MIN_SIZE 68 4526 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4527 #define MACB_WOL_ENABLED (0x1 << 1) 4528 #define MACB_HALT_TIMEOUT 1230 4529 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4530 #endif 4531 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4532 #endif 4533 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4534 #endif 4535 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4536 #endif 4537 #ifdef CONFIG_NET_POLL_CONTROLLER 4538 #endif 4539 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4540 #endif 4541 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4542 #endif 4543 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4544 #endif 4545 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4546 #endif 4547 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4548 #endif 4549 #ifdef CONFIG_NET_POLL_CONTROLLER 4550 #endif 4551 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4552 #endif 4553 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4554 #endif 4555 #if defined(CONFIG_OF) 4556 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4557 #define AT91ETHER_MAX_RX_DESCR 9 4558 /* LDV_COMMENT_END_PREP */ 4559 /* LDV_COMMENT_BEGIN_PREP */ 4560 #ifdef CONFIG_NET_POLL_CONTROLLER 4561 #endif 4562 #ifdef CONFIG_NET_POLL_CONTROLLER 4563 #endif 4564 #endif 4565 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4566 #endif 4567 /* LDV_COMMENT_END_PREP */ 4568 /* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/ 4569 /* LDV_COMMENT_BEGIN_PREP */ 4570 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4571 #define MACB_RX_BUFFER_SIZE 128 4572 #define RX_BUFFER_MULTIPLE 64 4573 #define RX_RING_SIZE 512 4574 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4575 #define TX_RING_SIZE 128 4576 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4577 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4578 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4579 | MACB_BIT(ISR_ROVR)) 4580 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4581 | MACB_BIT(ISR_RLE) \ 4582 | MACB_BIT(TXERR)) 4583 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4584 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4585 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4586 #define GEM_MTU_MIN_SIZE 68 4587 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4588 #define MACB_WOL_ENABLED (0x1 << 1) 4589 #define MACB_HALT_TIMEOUT 1230 4590 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4591 #endif 4592 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4593 #endif 4594 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4595 #endif 4596 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4597 #endif 4598 #ifdef CONFIG_NET_POLL_CONTROLLER 4599 #endif 4600 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4601 #endif 4602 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4603 #endif 4604 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4605 #endif 4606 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4607 #endif 4608 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4609 #endif 4610 /* LDV_COMMENT_END_PREP */ 4611 /* LDV_COMMENT_BEGIN_PREP */ 4612 #ifdef CONFIG_NET_POLL_CONTROLLER 4613 #endif 4614 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4615 #endif 4616 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4617 #endif 4618 #if defined(CONFIG_OF) 4619 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4620 #define AT91ETHER_MAX_RX_DESCR 9 4621 #ifdef CONFIG_NET_POLL_CONTROLLER 4622 #endif 4623 #ifdef CONFIG_NET_POLL_CONTROLLER 4624 #endif 4625 #endif 4626 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4627 #endif 4628 /* LDV_COMMENT_END_PREP */ 4629 /* content: static void macb_set_rx_mode(struct net_device *dev)*/ 4630 /* LDV_COMMENT_BEGIN_PREP */ 4631 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4632 #define MACB_RX_BUFFER_SIZE 128 4633 #define RX_BUFFER_MULTIPLE 64 4634 #define RX_RING_SIZE 512 4635 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4636 #define TX_RING_SIZE 128 4637 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4638 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4639 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4640 | MACB_BIT(ISR_ROVR)) 4641 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4642 | MACB_BIT(ISR_RLE) \ 4643 | MACB_BIT(TXERR)) 4644 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4645 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4646 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4647 #define GEM_MTU_MIN_SIZE 68 4648 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4649 #define MACB_WOL_ENABLED (0x1 << 1) 4650 #define MACB_HALT_TIMEOUT 1230 4651 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4652 #endif 4653 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4654 #endif 4655 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4656 #endif 4657 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4658 #endif 4659 #ifdef CONFIG_NET_POLL_CONTROLLER 4660 #endif 4661 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4662 #endif 4663 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4664 #endif 4665 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4666 #endif 4667 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4668 #endif 4669 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4670 #endif 4671 /* LDV_COMMENT_END_PREP */ 4672 /* LDV_COMMENT_BEGIN_PREP */ 4673 #ifdef CONFIG_NET_POLL_CONTROLLER 4674 #endif 4675 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4676 #endif 4677 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4678 #endif 4679 #if defined(CONFIG_OF) 4680 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4681 #define AT91ETHER_MAX_RX_DESCR 9 4682 #ifdef CONFIG_NET_POLL_CONTROLLER 4683 #endif 4684 #ifdef CONFIG_NET_POLL_CONTROLLER 4685 #endif 4686 #endif 4687 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4688 #endif 4689 /* LDV_COMMENT_END_PREP */ 4690 /* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/ 4691 /* LDV_COMMENT_BEGIN_PREP */ 4692 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4693 #define MACB_RX_BUFFER_SIZE 128 4694 #define RX_BUFFER_MULTIPLE 64 4695 #define RX_RING_SIZE 512 4696 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4697 #define TX_RING_SIZE 128 4698 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4699 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4700 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4701 | MACB_BIT(ISR_ROVR)) 4702 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4703 | MACB_BIT(ISR_RLE) \ 4704 | MACB_BIT(TXERR)) 4705 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4706 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4707 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4708 #define GEM_MTU_MIN_SIZE 68 4709 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4710 #define MACB_WOL_ENABLED (0x1 << 1) 4711 #define MACB_HALT_TIMEOUT 1230 4712 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4713 #endif 4714 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4715 #endif 4716 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4717 #endif 4718 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4719 #endif 4720 #ifdef CONFIG_NET_POLL_CONTROLLER 4721 #endif 4722 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4723 #endif 4724 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4725 #endif 4726 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4727 #endif 4728 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4729 #endif 4730 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4731 #endif 4732 /* LDV_COMMENT_END_PREP */ 4733 /* LDV_COMMENT_BEGIN_PREP */ 4734 #ifdef CONFIG_NET_POLL_CONTROLLER 4735 #endif 4736 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4737 #endif 4738 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4739 #endif 4740 #if defined(CONFIG_OF) 4741 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4742 #define AT91ETHER_MAX_RX_DESCR 9 4743 #ifdef CONFIG_NET_POLL_CONTROLLER 4744 #endif 4745 #ifdef CONFIG_NET_POLL_CONTROLLER 4746 #endif 4747 #endif 4748 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4749 #endif 4750 /* LDV_COMMENT_END_PREP */ 4751 /* content: static void at91ether_poll_controller(struct net_device *dev)*/ 4752 /* LDV_COMMENT_BEGIN_PREP */ 4753 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4754 #define MACB_RX_BUFFER_SIZE 128 4755 #define RX_BUFFER_MULTIPLE 64 4756 #define RX_RING_SIZE 512 4757 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 4758 #define TX_RING_SIZE 128 4759 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) 4760 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) 4761 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 4762 | MACB_BIT(ISR_ROVR)) 4763 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 4764 | MACB_BIT(ISR_RLE) \ 4765 | MACB_BIT(TXERR)) 4766 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 4767 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) 4768 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) 4769 #define GEM_MTU_MIN_SIZE 68 4770 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 4771 #define MACB_WOL_ENABLED (0x1 << 1) 4772 #define MACB_HALT_TIMEOUT 1230 4773 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4774 #endif 4775 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4776 #endif 4777 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4778 #endif 4779 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4780 #endif 4781 #ifdef CONFIG_NET_POLL_CONTROLLER 4782 #endif 4783 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 4784 #endif 4785 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4786 #endif 4787 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4788 #endif 4789 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4790 #endif 4791 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4792 #endif 4793 #ifdef CONFIG_NET_POLL_CONTROLLER 4794 #endif 4795 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4796 #endif 4797 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4798 #endif 4799 #if defined(CONFIG_OF) 4800 #define AT91ETHER_MAX_RBUFF_SZ 0x600 4801 #define AT91ETHER_MAX_RX_DESCR 9 4802