Error Trace
[Home]
Bug # 173
Show/hide error trace Error trace
{ 19 typedef signed char __s8; 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 29 typedef __u16 __be16; 31 typedef __u32 __be32; 36 typedef __u32 __wsum; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 74 typedef __kernel_clock_t clock_t; 102 typedef __s32 int32_t; 106 typedef __u8 uint8_t; 108 typedef __u32 uint32_t; 111 typedef __u64 uint64_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 161 typedef u64 phys_addr_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 103 struct kernel_symbol { unsigned long value; const char *name; } ; 34 struct module ; 115 typedef void (*ctor_fn_t)(); 83 struct ctl_table ; 58 struct device ; 64 struct net_device ; 465 struct file_operations ; 477 struct completion ; 478 struct pt_regs ; 546 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 114 struct timespec ; 115 struct compat_timespec ; 116 struct pollfd ; 117 struct __anonstruct_futex_27 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 117 struct __anonstruct_nanosleep_28 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 117 struct __anonstruct_poll_29 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 117 union __anonunion____missing_field_name_26 { struct __anonstruct_futex_27 futex; struct __anonstruct_nanosleep_28 nanosleep; struct __anonstruct_poll_29 poll; } ; 117 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_26 __annonCompField4; } ; 50 struct task_struct ; 39 struct page ; 26 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_32 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_33 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_31 { struct __anonstruct____missing_field_name_32 __annonCompField5; struct __anonstruct____missing_field_name_33 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_31 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_34 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_34 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_35 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_35 pgd_t; 297 struct __anonstruct_pmd_t_37 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_37 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 445 struct seq_file ; 481 struct thread_struct ; 483 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 83 struct static_key { atomic_t enabled; } ; 23 typedef atomic64_t atomic_long_t; 359 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 654 typedef struct cpumask *cpumask_var_t; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 233 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_61 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_62 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_60 { struct __anonstruct____missing_field_name_61 __annonCompField13; struct __anonstruct____missing_field_name_62 __annonCompField14; } ; 26 union __anonunion____missing_field_name_63 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_60 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_63 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; union fpregs_state state; } ; 180 struct seq_operations ; 386 struct perf_event ; 391 struct __anonstruct_mm_segment_t_75 { unsigned long seg; } ; 391 typedef struct __anonstruct_mm_segment_t_75 mm_segment_t; 392 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 48 struct thread_info { unsigned long flags; } ; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 593 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_77 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_76 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_77 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_76 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_78 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_78 rwlock_t; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 407 struct __anonstruct_seqlock_t_93 { struct seqcount seqcount; spinlock_t lock; } ; 407 typedef struct __anonstruct_seqlock_t_93 seqlock_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 7 typedef __s64 time64_t; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_94 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_94 kuid_t; 27 struct __anonstruct_kgid_t_95 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_95 kgid_t; 139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct vm_area_struct ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 97 struct __anonstruct_nodemask_t_96 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_96 nodemask_t; 249 typedef unsigned int isolate_mode_t; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_long_t owner; spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; } ; 70 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 222 struct rw_semaphore ; 223 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 178 struct completion { unsigned int done; wait_queue_head_t wait; } ; 28 typedef s64 ktime_t; 1145 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 835 struct nsproxy ; 836 struct ctl_table_root ; 837 struct ctl_table_header ; 838 struct ctl_dir ; 39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 61 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ; 100 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ; 121 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ; 126 struct __anonstruct____missing_field_name_98 { struct ctl_table *ctl_table; int used; int count; int nreg; } ; 126 union __anonunion____missing_field_name_97 { struct __anonstruct____missing_field_name_98 __annonCompField21; struct callback_head rcu; } ; 126 struct ctl_table_set ; 126 struct ctl_table_header { union __anonunion____missing_field_name_97 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ; 147 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ; 153 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ; 158 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ; 278 struct workqueue_struct ; 279 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool in_dpm_list; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 618 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 38 struct ldt_struct ; 38 struct vdso_image ; 38 struct __anonstruct_mm_context_t_163 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; void *bd_addr; } ; 38 typedef struct __anonstruct_mm_context_t_163 mm_context_t; 22 struct bio_vec ; 1264 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 37 struct cred ; 19 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 95 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 111 struct xol_area ; 112 struct uprobes_state { struct xol_area *xol_area; } ; 151 struct address_space ; 152 struct mem_cgroup ; 153 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 153 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ; 153 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 153 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ; 153 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ; 153 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ; 153 struct dev_pagemap ; 153 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ; 153 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 153 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 153 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ; 153 struct kmem_cache ; 153 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 153 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ; 197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 282 struct userfaultfd_ctx ; 282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ; 289 struct anon_vma ; 289 struct vm_operations_struct ; 289 struct mempolicy ; 289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 362 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 381 struct task_rss_stat { int events; int count[4U]; } ; 389 struct mm_rss_stat { atomic_long_t count[4U]; } ; 394 struct kioctx_table ; 395 struct linux_binfmt ; 395 struct mmu_notifier_mm ; 395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 560 struct vm_fault ; 614 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 53 union __anonunion____missing_field_name_229 { unsigned long bitmap[1U]; struct callback_head callback_head; } ; 53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[64U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ; 40 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 149 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 192 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 229 struct dentry ; 230 struct iattr ; 231 struct super_block ; 232 struct file_system_type ; 233 struct kernfs_open_node ; 234 struct kernfs_iattrs ; 257 struct kernfs_root ; 257 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_238 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_238 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 511 struct sock ; 512 struct kobject ; 513 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 519 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_241 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_241 __annonCompField51; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct exception_table_entry ; 24 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 329 struct module_sect_attrs ; 329 struct module_notes_attrs ; 329 struct trace_event_call ; 329 struct trace_enum_map ; 329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 158 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 21 struct pipe_inode_info ; 22 struct kvec { void *iov_base; size_t iov_len; } ; 29 union __anonunion____missing_field_name_248 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; struct pipe_inode_info *pipe; } ; 29 union __anonunion____missing_field_name_249 { unsigned long nr_segs; int idx; } ; 29 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_248 __annonCompField52; union __anonunion____missing_field_name_249 __annonCompField53; } ; 11 typedef unsigned short __kernel_sa_family_t; 18 struct pid ; 23 typedef __kernel_sa_family_t sa_family_t; 24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ; 38 struct kiocb ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_269 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_268 { struct __anonstruct____missing_field_name_269 __annonCompField54; } ; 114 struct lockref { union __anonunion____missing_field_name_268 __annonCompField55; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_271 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_270 { struct __anonstruct____missing_field_name_271 __annonCompField56; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_270 __annonCompField57; const unsigned char *name; } ; 65 struct dentry_operations ; 65 union __anonunion____missing_field_name_272 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 65 union __anonunion_d_u_273 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_272 __annonCompField58; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_273 d_u; } ; 121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 592 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 80 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 63 union __anonunion____missing_field_name_274 { struct list_head private_list; struct callback_head callback_head; } ; 63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char exceptional; struct radix_tree_node *parent; void *private_data; union __anonunion____missing_field_name_274 __annonCompField59; void *slots[64U]; unsigned long tags[3U][1U]; } ; 105 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 519 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 526 struct pid_namespace ; 526 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; wait_queue_head_t writer; int readers_block; } ; 144 struct delayed_call { void (*fn)(void *); void *arg; } ; 282 struct backing_dev_info ; 283 struct bdi_writeback ; 285 struct export_operations ; 287 struct poll_table_struct ; 288 struct kstatfs ; 289 struct swap_info_struct ; 290 struct fscrypt_info ; 291 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 210 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_278 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_278 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_279 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_279 __annonCompField60; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 540 struct writeback_control ; 541 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 317 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 376 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ; 398 struct request_queue ; 399 struct hd_struct ; 399 struct gendisk ; 399 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 514 struct posix_acl ; 541 struct inode_operations ; 541 union __anonunion____missing_field_name_284 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 541 union __anonunion____missing_field_name_285 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 541 struct file_lock_context ; 541 struct cdev ; 541 union __anonunion____missing_field_name_286 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 541 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_284 __annonCompField61; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_285 __annonCompField62; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_286 __annonCompField63; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 797 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 805 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 828 union __anonunion_f_u_287 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 828 struct file { union __anonunion_f_u_287 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 913 typedef void *fl_owner_t; 914 struct file_lock ; 915 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 921 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 942 struct net ; 948 struct nlm_lockowner ; 949 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_289 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_288 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_289 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_288 fl_u; } ; 1001 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1068 struct files_struct ; 1221 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1256 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1286 struct super_operations ; 1286 struct xattr_handler ; 1286 struct mtd_info ; 1286 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1570 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1583 struct dir_context ; 1608 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1615 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1683 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1753 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 1995 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 126 struct sk_buff ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 26 struct sem_undo_list ; 26 struct sysv_sem { struct sem_undo_list *undo_list; } ; 78 struct user_struct ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_290 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_290 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 38 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_292 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_293 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_294 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_295 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_298 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_297 { struct __anonstruct__addr_bnd_298 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_296 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_297 __annonCompField64; } ; 11 struct __anonstruct__sigpoll_299 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_300 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_291 { int _pad[28U]; struct __anonstruct__kill_292 _kill; struct __anonstruct__timer_293 _timer; struct __anonstruct__rt_294 _rt; struct __anonstruct__sigchld_295 _sigchld; struct __anonstruct__sigfault_296 _sigfault; struct __anonstruct__sigpoll_299 _sigpoll; struct __anonstruct__sigsys_300 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_291 _sifields; } ; 118 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 274 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 288 struct k_sigaction { struct sigaction sa; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ; 125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 41 struct assoc_array_ptr ; 41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_303 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_304 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_306 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_305 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_306 __annonCompField67; } ; 128 struct __anonstruct____missing_field_name_308 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_307 { union key_payload payload; struct __anonstruct____missing_field_name_308 __annonCompField69; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_303 __annonCompField65; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_304 __annonCompField66; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_305 __annonCompField68; union __anonunion____missing_field_name_307 __annonCompField70; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 377 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ; 85 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 368 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 607 struct cgroup ; 14 struct bpf_prog ; 14 struct cgroup_bpf { struct bpf_prog *prog[3U]; struct bpf_prog *effective[3U]; } ; 44 struct cgroup_root ; 45 struct cgroup_subsys ; 46 struct cgroup_taskset ; 90 struct cgroup_file { struct kernfs_node *kn; } ; 91 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ; 142 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ; 222 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; struct cgroup_bpf bpf; int ancestor_ids[]; } ; 310 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 349 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 434 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 134 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 515 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 563 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 571 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 578 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 603 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 619 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 641 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 686 struct autogroup ; 687 struct tty_struct ; 687 struct taskstats ; 687 struct tty_audit_buf ; 687 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; } ; 863 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 908 struct reclaim_state ; 909 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 924 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 981 struct wake_q_node { struct wake_q_node *next; } ; 1226 struct io_context ; 1260 struct uts_namespace ; 1261 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1269 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1327 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1362 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1399 struct rt_rq ; 1399 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1417 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1481 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1500 struct sched_class ; 1500 struct compat_robust_list_head ; 1500 struct numa_group ; 1500 struct kcov ; 1500 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *ptracer_cred; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; int closid; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ; 60 struct exception_table_entry { int insn; int fixup; int handler; } ; 161 struct in6_addr ; 184 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 68 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 76 struct dma_map_ops ; 76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 34 struct iommu_fwspec ; 62 struct device_attribute ; 62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 143 struct device_type ; 202 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 208 struct of_device_id ; 208 struct acpi_device_id ; 208 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 358 struct class_attribute ; 358 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 453 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 523 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 551 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 723 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 786 enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3 } ; 793 struct dev_links_info { struct list_head suppliers; struct list_head consumers; enum dl_dev_state status; } ; 813 struct irq_domain ; 813 struct dma_coherent_mem ; 813 struct cma ; 813 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ; 971 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 1440 struct scatterlist ; 96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 273 struct vm_fault { struct vm_area_struct *vma; unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; pmd_t *pmd; pte_t orig_pte; struct page *cow_page; struct mem_cgroup *memcg; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 322 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_fault *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 2439 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 406 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 15 typedef u64 netdev_features_t; 70 union __anonunion_in6_u_356 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ; 70 struct in6_addr { union __anonunion_in6_u_356 in6_u; } ; 46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ; 227 struct pipe_buf_operations ; 227 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ; 27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ; 63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ; 272 struct napi_struct ; 273 struct nf_conntrack { atomic_t use; } ; 254 union __anonunion____missing_field_name_370 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ; 254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_370 __annonCompField90; } ; 278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ; 310 struct skb_frag_struct ; 310 typedef struct skb_frag_struct skb_frag_t; 311 struct __anonstruct_page_371 { struct page *p; } ; 311 struct skb_frag_struct { struct __anonstruct_page_371 page; __u32 page_offset; __u32 size; } ; 344 struct skb_shared_hwtstamps { ktime_t hwtstamp; } ; 410 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; u32 tskey; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ; 500 typedef unsigned int sk_buff_data_t; 501 struct __anonstruct____missing_field_name_373 { u32 stamp_us; u32 stamp_jiffies; } ; 501 union __anonunion____missing_field_name_372 { u64 v64; struct __anonstruct____missing_field_name_373 __annonCompField91; } ; 501 struct skb_mstamp { union __anonunion____missing_field_name_372 __annonCompField92; } ; 564 union __anonunion____missing_field_name_376 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ; 564 struct __anonstruct____missing_field_name_375 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_376 __annonCompField93; } ; 564 union __anonunion____missing_field_name_374 { struct __anonstruct____missing_field_name_375 __annonCompField94; struct rb_node rbnode; } ; 564 union __anonunion____missing_field_name_377 { struct net_device *dev; unsigned long dev_scratch; } ; 564 struct sec_path ; 564 struct __anonstruct____missing_field_name_379 { __u16 csum_start; __u16 csum_offset; } ; 564 union __anonunion____missing_field_name_378 { __wsum csum; struct __anonstruct____missing_field_name_379 __annonCompField97; } ; 564 union __anonunion____missing_field_name_380 { unsigned int napi_id; unsigned int sender_cpu; } ; 564 union __anonunion____missing_field_name_381 { __u32 mark; __u32 reserved_tailroom; } ; 564 union __anonunion____missing_field_name_382 { __be16 inner_protocol; __u8 inner_ipproto; } ; 564 struct sk_buff { union __anonunion____missing_field_name_374 __annonCompField95; struct sock *sk; union __anonunion____missing_field_name_377 __annonCompField96; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0U]; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; unsigned char __unused; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; unsigned char offload_fwd_mark; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_378 __annonCompField98; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_380 __annonCompField99; __u32 secmark; union __anonunion____missing_field_name_381 __annonCompField100; union __anonunion____missing_field_name_382 __annonCompField101; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ; 845 struct dst_entry ; 1426 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ; 43 struct __anonstruct_sync_serial_settings_385 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ; 43 typedef struct __anonstruct_sync_serial_settings_385 sync_serial_settings; 50 struct __anonstruct_te1_settings_386 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ; 50 typedef struct __anonstruct_te1_settings_386 te1_settings; 55 struct __anonstruct_raw_hdlc_proto_387 { unsigned short encoding; unsigned short parity; } ; 55 typedef struct __anonstruct_raw_hdlc_proto_387 raw_hdlc_proto; 65 struct __anonstruct_fr_proto_388 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ; 65 typedef struct __anonstruct_fr_proto_388 fr_proto; 69 struct __anonstruct_fr_proto_pvc_389 { unsigned int dlci; } ; 69 typedef struct __anonstruct_fr_proto_pvc_389 fr_proto_pvc; 74 struct __anonstruct_fr_proto_pvc_info_390 { unsigned int dlci; char master[16U]; } ; 74 typedef struct __anonstruct_fr_proto_pvc_info_390 fr_proto_pvc_info; 79 struct __anonstruct_cisco_proto_391 { unsigned int interval; unsigned int timeout; } ; 79 typedef struct __anonstruct_cisco_proto_391 cisco_proto; 117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ; 197 union __anonunion_ifs_ifsu_392 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ; 197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_392 ifs_ifsu; } ; 216 union __anonunion_ifr_ifrn_393 { char ifrn_name[16U]; } ; 216 union __anonunion_ifr_ifru_394 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ; 216 struct ifreq { union __anonunion_ifr_ifrn_393 ifr_ifrn; union __anonunion_ifr_ifru_394 ifr_ifru; } ; 18 typedef s32 compat_time_t; 39 typedef s32 compat_long_t; 45 typedef u32 compat_uptr_t; 46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ; 278 struct compat_robust_list { compat_uptr_t next; } ; 282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ; 39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ; 130 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ; 194 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ; 238 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ; 256 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ; 285 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ; 311 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ; 340 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ; 357 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ; 456 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ; 493 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ; 521 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ; 627 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ; 659 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ; 701 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ; 734 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ; 750 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ; 770 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ; 788 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ; 804 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ; 820 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ; 837 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ; 856 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ; 906 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ; 1077 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ; 1085 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ; 1161 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ; 1537 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ; 39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; 97 struct __anonstruct_link_modes_414 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ; 97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_414 link_modes; } ; 158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ; 375 struct prot_inuse ; 376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ; 38 struct u64_stats_sync { } ; 164 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ; 61 struct icmp_mib { unsigned long mibs[28U]; } ; 67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ; 72 struct icmpv6_mib { unsigned long mibs[6U]; } ; 83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ; 93 struct tcp_mib { unsigned long mibs[16U]; } ; 100 struct udp_mib { unsigned long mibs[9U]; } ; 106 struct linux_mib { unsigned long mibs[118U]; } ; 112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ; 118 struct proc_dir_entry ; 118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ; 26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ; 12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ; 14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ; 187 struct ipv4_devconf ; 188 struct fib_rules_ops ; 189 struct fib_table ; 190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ; 24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ; 29 struct inet_peer_base ; 29 struct xt_table ; 29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; unsigned int fib_seq; atomic_t rt_genid; } ; 144 struct neighbour ; 144 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ; 73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ; 40 struct ipv6_devconf ; 40 struct rt6_info ; 40 struct rt6_statistics ; 40 struct fib6_table ; 40 struct seg6_pernet_data ; 40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; struct seg6_pernet_data *seg6_data; } ; 90 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ; 96 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ; 14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ; 20 struct sctp_mib ; 21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ; 141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ; 79 struct nf_logger ; 80 struct nf_queue_handler ; 81 struct nf_hook_entry ; 81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct nf_hook_entry *hooks[13U][8U]; bool defrag_ipv4; bool defrag_ipv6; } ; 26 struct ebt_table ; 27 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ; 19 struct hlist_nulls_node ; 19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ; 23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ; 114 struct ip_conntrack_stat { unsigned int found; unsigned int invalid; unsigned int ignore; unsigned int insert; unsigned int insert_failed; unsigned int drop; unsigned int early_drop; unsigned int error; unsigned int expect_new; unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; } ; 13 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; unsigned int users; } ; 27 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ; 32 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ; 46 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 51 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ; 56 struct nf_dccp_net { struct nf_proto_net pn; int dccp_loose; unsigned int dccp_timeout[10U]; } ; 63 struct nf_sctp_net { struct nf_proto_net pn; unsigned int timeouts[10U]; } ; 76 struct nf_udplite_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 83 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct nf_dccp_net dccp; struct nf_sctp_net sctp; struct nf_udplite_net udplite; } ; 100 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ; 107 struct nf_ct_event_notifier ; 107 struct nf_exp_event_notifier ; 107 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; } ; 138 struct nft_af_info ; 139 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ; 506 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 728 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ; 16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct hlist_node node; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ; 25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ; 21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ; 30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ; 87 struct mpls_route ; 88 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ; 16 struct proc_ns_operations ; 17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ; 11 struct net_generic ; 12 struct netns_ipvs ; 13 struct ucounts ; 13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; struct ucounts *ucounts; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ; 248 struct __anonstruct_possible_net_t_435 { struct net *net; } ; 248 typedef struct __anonstruct_possible_net_t_435 possible_net_t; 288 struct pernet_operations { struct list_head list; int (*init)(struct net *); void (*exit)(struct net *); void (*exit_batch)(struct list_head *); unsigned int *id; size_t size; } ; 13 typedef unsigned long kernel_ulong_t; 187 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 230 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 675 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_ACPI_STATIC = 4, FWNODE_PDATA = 5, FWNODE_IRQCHIP = 6 } ; 685 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 296 struct mii_bus ; 303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ; 41 struct mdio_driver_common { struct device_driver driver; int flags; } ; 244 struct phy_device ; 245 enum led_brightness { LED_OFF = 0, LED_HALF = 127, LED_FULL = 255 } ; 251 struct led_trigger ; 251 struct led_classdev { const char *name; enum led_brightness brightness; enum led_brightness max_brightness; int flags; unsigned long work_flags; void (*brightness_set)(struct led_classdev *, enum led_brightness ); int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness ); enum led_brightness (*brightness_get)(struct led_classdev *); int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *); struct device *dev; const struct attribute_group **groups; struct list_head node; const char *default_trigger; unsigned long blink_delay_on; unsigned long blink_delay_off; struct timer_list blink_timer; int blink_brightness; int new_blink_brightness; void (*flash_resume)(struct led_classdev *); struct work_struct set_brightness_work; int delayed_set_value; struct rw_semaphore trigger_lock; struct led_trigger *trigger; struct list_head trig_list; void *trigger_data; bool activated; struct mutex led_access; } ; 226 struct led_trigger { const char *name; void (*activate)(struct led_classdev *); void (*deactivate)(struct led_classdev *); rwlock_t leddev_list_lock; struct list_head led_cdevs; struct list_head next_trig; } ; 418 struct phy_led_trigger { struct led_trigger trigger; char name[31U]; unsigned int speed; } ; 39 enum ldv_31739 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_TRGMII = 16, PHY_INTERFACE_MODE_MAX = 17 } ; 86 typedef enum ldv_31739 phy_interface_t; 149 enum ldv_31792 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; 156 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_31792 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ; 237 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; 252 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ; 345 struct phy_driver ; 345 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; u32 eee_broken_modes; int autoneg; int link_timeout; struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; struct phy_led_trigger *last_triggered; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; u8 mdix_ctrl; void (*adjust_link)(struct net_device *); } ; 457 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); } ; 884 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ; 27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_PROTO_QCA = 5, DSA_TAG_LAST = 6 } ; 37 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ; 71 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ; 87 struct packet_type ; 88 struct dsa_switch ; 88 struct dsa_device_ops ; 88 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ; 141 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; u8 stp_state; } ; 148 struct dsa_switch_ops ; 148 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_ops *ops; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ; 235 struct switchdev_trans ; 236 struct switchdev_obj ; 237 struct switchdev_obj_port_fdb ; 238 struct switchdev_obj_port_mdb ; 239 struct switchdev_obj_port_vlan ; 240 struct dsa_switch_ops { struct list_head list; const char * (*probe)(struct device *, struct device *, int, void **); enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); void (*port_fast_age)(struct dsa_switch *, int); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *)); } ; 407 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ; 69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ; 87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ; 132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ; 144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ; 164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ; 187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ; 202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ; 236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ; 40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ; 105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ; 58 struct mnt_namespace ; 59 struct ipc_namespace ; 60 struct cgroup_namespace ; 61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ; 86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ; 19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ; 31 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; int ucount_max[7U]; } ; 63 struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_t ucount[7U]; } ; 631 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; } ; 686 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ; 41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ; 143 struct nlattr { __u16 nla_len; __u16 nla_type; } ; 105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ; 183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ; 41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ; 869 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ; 16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; } ; 118 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ; 96 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ; 122 struct tcf_t { __u64 install; __u64 lastuse; __u64 expires; __u64 firstuse; } ; 117 struct netpoll_info ; 118 struct wireless_dev ; 119 struct wpan_dev ; 120 struct mpls_dev ; 121 struct udp_tunnel_info ; 70 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ; 113 typedef enum netdev_tx netdev_tx_t; 132 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ; 196 struct neigh_parms ; 217 struct netdev_hw_addr_list { struct list_head list; int count; } ; 222 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ; 251 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ; 302 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ; 357 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; 405 typedef enum rx_handler_result rx_handler_result_t; 406 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); 541 struct Qdisc ; 541 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ; 612 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ; 624 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ; 636 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ; 688 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ; 711 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ; 724 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ; 735 struct netdev_tc_txq { u16 count; u16 offset; } ; 746 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ; 762 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ; 790 struct tc_cls_u32_offload ; 791 struct tc_cls_flower_offload ; 791 struct tc_cls_matchall_offload ; 791 struct tc_cls_bpf_offload ; 791 union __anonunion____missing_field_name_451 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; } ; 791 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_451 __annonCompField114; bool egress_dev; } ; 808 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ; 813 union __anonunion____missing_field_name_452 { struct bpf_prog *prog; bool prog_attached; } ; 813 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_452 __annonCompField115; } ; 836 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(const struct net_device *, int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ; 1372 struct __anonstruct_adj_list_453 { struct list_head upper; struct list_head lower; } ; 1372 struct iw_handler_def ; 1372 struct iw_public_data ; 1372 struct switchdev_ops ; 1372 struct l3mdev_ops ; 1372 struct ndisc_ops ; 1372 struct vlan_info ; 1372 struct tipc_bearer ; 1372 struct in_device ; 1372 struct dn_dev ; 1372 struct inet6_dev ; 1372 struct tcf_proto ; 1372 struct cpu_rmap ; 1372 struct pcpu_lstats ; 1372 struct pcpu_sw_netstats ; 1372 struct pcpu_dstats ; 1372 struct pcpu_vstats ; 1372 union __anonunion____missing_field_name_454 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ; 1372 struct garp_port ; 1372 struct mrp_port ; 1372 struct rtnl_link_ops ; 1372 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_453 adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned int min_mtu; unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct nf_hook_entry *nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; struct hlist_head qdisc_hash[16U]; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_454 __annonCompField116; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ; 2194 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ; 2222 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ; 520 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ; 142 struct nla_policy { u16 type; u16 len; } ; 27 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ; 51 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ; 77 struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; } ; 13 struct net_rate_estimator ; 14 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; } ; 25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ; 158 struct Qdisc_ops ; 159 struct qdisc_walker ; 160 struct tcf_walker ; 30 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ; 38 struct qdisc_skb_head { struct sk_buff *head; struct sk_buff *tail; __u32 qlen; spinlock_t lock; } ; 46 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct hlist_node hash; u32 handle; u32 parent; void *u32_node; struct netdev_queue *dev_queue; struct net_rate_estimator *rate_est; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; struct sk_buff *gso_skb; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; struct sk_buff *skb_bad_txq; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ; 134 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 ); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ; 166 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ; 191 struct tcf_result { unsigned long class; u32 classid; } ; 197 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); bool (*destroy)(struct tcf_proto *, bool ); unsigned long int (*get)(struct tcf_proto *, u32 ); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ; 222 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct callback_head rcu; } ; 240 struct qdisc_skb_cb { unsigned int pkt_len; u16 slave_dev_queue_mapping; u16 tc_classid; unsigned char data[20U]; } ; 846 struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); } ; 137 struct tc_skbmod { __u32 index; __u32 capab; int action; int refcnt; int bindcnt; __u64 flags; } ; 37 struct __anonstruct_s_478 { unsigned int len; struct callback_head rcu; } ; 37 union __anonunion____missing_field_name_477 { struct __anonstruct_s_478 s; void *ptr[0U]; } ; 37 struct net_generic { union __anonunion____missing_field_name_477 __annonCompField117; } ; 44 union __anonunion___u_480 { struct net_generic *__val; char __c[1U]; } ; 50 struct tcf_hashinfo { struct hlist_head *htab; unsigned int hmask; spinlock_t lock; u32 index; } ; 20 struct tc_action_ops ; 21 struct tc_action { const struct tc_action_ops *ops; __u32 type; __u32 order; struct list_head list; struct tcf_hashinfo *hinfo; struct hlist_node tcfa_head; u32 tcfa_index; int tcfa_refcnt; int tcfa_bindcnt; u32 tcfa_capab; int tcfa_action; struct tcf_t tcfa_tm; struct gnet_stats_basic_packed tcfa_bstats; struct gnet_stats_queue tcfa_qstats; struct net_rate_estimator *tcfa_rate_est; spinlock_t tcfa_lock; struct callback_head tcfa_rcu; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; } ; 99 struct tc_action_ops { struct list_head head; char kind[16U]; __u32 type; size_t size; struct module *owner; int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *, int); int (*lookup)(struct net *, struct tc_action **, u32 ); int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action **, int, int); int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *); void (*stats_update)(struct tc_action *, u64 , u32 , u64 ); int (*get_dev)(const struct tc_action *, struct net *, struct net_device **); } ; 124 struct tc_action_net { struct tcf_hashinfo *hinfo; const struct tc_action_ops *ops; } ; 204 struct tcf_skbmod_params { struct callback_head rcu; u64 flags; u8 eth_dst[6U]; u16 eth_type; u8 eth_src[6U]; } ; 23 struct tcf_skbmod { struct tc_action common; struct tcf_skbmod_params *skbmod_p; } ; 53 union __anonunion___u_482 { int __val; char __c[1U]; } ; 60 union __anonunion___u_484 { struct tcf_skbmod_params *__val; char __c[1U]; } ; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 long int __builtin_expect(long exp, long c); 26 void * ldv_undef_ptr(); 5 void ldv_rcu_inc(); 10 void ldv_rcu_dec(); 15 void ldv_check_for_read_section(); 241 void __read_once_size(const volatile void *p, void *res, int size); 34 extern struct module __this_module; 46 __u16 __fswab16(__u16 val); 178 int printk(const char *, ...); 273 void dump_stack(); 545 extern unsigned long this_cpu_off; 71 void warn_slowpath_null(const char *, const int); 592 void lockdep_rcu_suspicious(const char *, const int, const char *); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 30 void _raw_spin_lock_bh(raw_spinlock_t *); 42 void _raw_spin_unlock_bh(raw_spinlock_t *); 289 raw_spinlock_t * spinlock_check(spinlock_t *lock); 305 void spin_lock_bh(spinlock_t *lock); 350 void spin_unlock_bh(spinlock_t *lock); 78 extern volatile unsigned long jiffies; 439 clock_t jiffies_to_clock_t(unsigned long); 230 void ldv_synchronize_sched_1(); 51 void kfree_call_rcu(struct callback_head *, void (*)(struct callback_head *)); 501 int debug_lockdep_rcu_enabled(); 503 int rcu_read_lock_held(); 867 void rcu_read_lock(); 925 void rcu_read_unlock(); 154 void kfree(const void *); 330 void * __kmalloc(size_t , gfp_t ); 478 void * kmalloc(size_t size, gfp_t flags); 634 void * kzalloc(size_t size, gfp_t flags); 1198 unsigned char * skb_end_pointer(const struct sk_buff *skb); 1882 unsigned char * skb_tail_pointer(const struct sk_buff *skb); 2176 unsigned char * skb_mac_header(const struct sk_buff *skb); 2330 void skb_trim(struct sk_buff *, unsigned int); 3095 int skb_ensure_writable(struct sk_buff *, int); 3770 bool skb_is_gso(const struct sk_buff *skb); 26 struct ethhdr * eth_hdr(const struct sk_buff *skb); 78 void u64_stats_update_begin(struct u64_stats_sync *syncp); 30 int rtnl_is_locked(); 36 bool lockdep_rtnl_is_held(); 241 int nla_parse(struct nlattr **, int, const struct nlattr *, int, const struct nla_policy *); 262 int nla_put(struct sk_buff *, int, int, const void *); 263 int nla_put_64bit(struct sk_buff *, int, int, const void *, int); 531 void nlmsg_trim(struct sk_buff *skb, const void *mark); 680 void * nla_data(const struct nlattr *nla); 689 int nla_len(const struct nlattr *nla); 744 int nla_parse_nested(struct nlattr **tb, int maxtype, const struct nlattr *nla, const struct nla_policy *policy); 768 int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value); 1043 u16 nla_get_u16(const struct nlattr *nla); 274 void ether_addr_copy(u8 *dst, const u8 *src); 262 struct qdisc_skb_cb * qdisc_skb_cb(const struct sk_buff *skb); 486 unsigned int qdisc_pkt_len(const struct sk_buff *skb); 526 void _bstats_update(struct gnet_stats_basic_packed *bstats, __u64 bytes, __u32 packets); 533 void bstats_update(struct gnet_stats_basic_packed *bstats, const struct sk_buff *skb); 549 void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, const struct sk_buff *skb); 591 void qstats_overlimit_inc(struct gnet_stats_queue *qstats); 38 void * net_generic(const struct net *net, unsigned int id); 63 int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask); 82 void tcf_lastuse_update(struct tcf_t *tm); 92 void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm); 132 int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops, unsigned int mask); 147 void tcf_hashinfo_destroy(const struct tc_action_ops *, struct tcf_hashinfo *); 150 void tc_action_net_exit(struct tc_action_net *tn); 156 int tcf_generic_walker(struct tc_action_net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *); 159 int tcf_hash_search(struct tc_action_net *, struct tc_action **, u32 ); 161 bool tcf_hash_check(struct tc_action_net *, u32 , struct tc_action **, int); 163 int tcf_hash_create(struct tc_action_net *, u32 , struct nlattr *, struct tc_action **, const struct tc_action_ops *, int, bool ); 167 void tcf_hash_insert(struct tc_action_net *, struct tc_action *); 169 int __tcf_hash_release(struct tc_action *, bool , bool ); 171 int tcf_hash_release(struct tc_action *a, bool bind); 176 int tcf_register_action(struct tc_action_ops *, struct pernet_operations *); 177 int tcf_unregister_action(struct tc_action_ops *, struct pernet_operations *); 26 unsigned int skbmod_net_id = 0U; 27 struct tc_action_ops act_skbmod_ops; 30 int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res); 81 const struct nla_policy skbmod_policy[7U] = { { (unsigned short)0, (unsigned short)0 }, { (unsigned short)0, (unsigned short)0 }, { (unsigned short)0, 32U }, { (unsigned short)0, 6U }, { (unsigned short)0, 6U }, { 2U, (unsigned short)0 } }; 88 int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind); 190 void tcf_skbmod_cleanup(struct tc_action *a, int bind); 199 int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref); 237 int tcf_skbmod_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops); 246 int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index); 253 struct tc_action_ops act_skbmod_ops = { { 0, 0 }, { 's', 'k', 'b', 'm', 'o', 'd', '\x0' }, 15U, 264UL, &__this_module, &tcf_skbmod_run, &tcf_skbmod_dump, &tcf_skbmod_cleanup, &tcf_skbmod_search, &tcf_skbmod_init, &tcf_skbmod_walker, 0, 0 }; 266 int skbmod_init_net(struct net *net); 273 void skbmod_exit_net(struct net *net); 280 struct pernet_operations skbmod_net_ops = { { 0, 0 }, &skbmod_init_net, &skbmod_exit_net, 0, &skbmod_net_id, 16UL }; 291 int skbmod_init_module(); 296 void skbmod_cleanup_module(); 320 void ldv_check_final_state(); 329 void ldv_initialize(); 332 void ldv_handler_precall(); 335 int nondet_int(); 338 int LDV_IN_INTERRUPT = 0; 341 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 5 int ldv_rcu_nested = 0; 7 int ldv_rcu_bh_nested = 0; 9 int ldv_rcu_sched_nested = 0; 11 int ldv_srcu_nested = 0; 28 void ldv_rcu_bh_inc(); 35 void ldv_rcu_bh_dec(); 43 void ldv_rcu_sched_inc(); 50 void ldv_rcu_sched_dec(); 57 void ldv_srcu_inc(); 64 void ldv_srcu_dec(); return ; } { 343 struct sk_buff *var_group1; 344 const struct tc_action *var_tcf_skbmod_run_0_p1; 345 struct tcf_result *var_tcf_skbmod_run_0_p2; 346 struct tc_action *var_group2; 347 int var_tcf_skbmod_dump_3_p2; 348 int var_tcf_skbmod_dump_3_p3; 349 struct net *var_group3; 350 struct nlattr *var_group4; 351 struct nlattr *var_tcf_skbmod_init_1_p2; 352 struct tc_action **var_tcf_skbmod_init_1_p3; 353 int var_tcf_skbmod_init_1_p4; 354 int var_tcf_skbmod_init_1_p5; 355 int var_tcf_skbmod_cleanup_2_p1; 356 struct netlink_callback *var_tcf_skbmod_walker_4_p2; 357 int var_tcf_skbmod_walker_4_p3; 358 const struct tc_action_ops *var_tcf_skbmod_walker_4_p4; 359 struct tc_action **var_group5; 360 unsigned int var_tcf_skbmod_search_5_p2; 361 int tmp; 362 int tmp___0; 363 int tmp___1; 433 LDV_IN_INTERRUPT = 1; 442 ldv_initialize() { /* Function call is skipped due to function is undefined */} 451 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 293 int tmp; 293 tmp = tcf_register_action(&act_skbmod_ops, &skbmod_net_ops) { /* Function call is skipped due to function is undefined */} } 459 goto ldv_51834; 459 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 461 goto ldv_51833; 460 ldv_51833:; 462 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 462 switch (tmp___0); 495 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 202 struct tcf_skbmod *d; 203 unsigned char *b; 204 unsigned char *tmp; 205 struct tcf_skbmod_params *p; 206 _Bool __warned; 207 int tmp___0; 208 _Bool tmp___1; 209 int tmp___2; 210 struct tc_skbmod opt; 211 struct tcf_t t; 212 int tmp___3; 213 int tmp___4; 214 int tmp___5; 215 unsigned short tmp___6; 216 int tmp___7; 217 int tmp___8; 202 d = (struct tcf_skbmod *)a; { 1884 unsigned char *__CPAchecker_TMP_0 = (unsigned char *)(skb->head); 1884 unsigned long __CPAchecker_TMP_1 = (unsigned long)(skb->tail); 1884 return __CPAchecker_TMP_0 + __CPAchecker_TMP_1;; } 203 b = tmp; 204 tmp___0 = debug_lockdep_rcu_enabled() { /* Function call is skipped due to function is undefined */} 204 p = d->skbmod_p; 205 opt.index = d->common.tcfa_index; 205 opt.capab = 0U; 205 opt.action = d->common.tcfa_action; 205 opt.refcnt = (d->common.tcfa_refcnt) - ref; 205 opt.bindcnt = (d->common.tcfa_bindcnt) - bind; 205 opt.flags = 0ULL; 213 opt.flags = p->flags; 214 tmp___3 = nla_put(skb, 2, 32, (const void *)(&opt)) { /* Function call is skipped due to function is undefined */} 216 int __CPAchecker_TMP_0 = (int)(p->flags); 216 tmp___4 = nla_put(skb, 3, 6, (const void *)(&(p->eth_dst))) { /* Function call is skipped due to function is undefined */} { 94 long tmp; 95 long tmp___0; 96 long tmp___1; 97 long tmp___2; 94 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(stm->install); 94 tmp = jiffies_to_clock_t((unsigned long)(((unsigned long long)jiffies) - __CPAchecker_TMP_0)) { /* Function call is skipped due to function is undefined */} 94 dtm->install = (__u64 )tmp; 95 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(stm->lastuse); 95 tmp___0 = jiffies_to_clock_t((unsigned long)(((unsigned long long)jiffies) - __CPAchecker_TMP_1)) { /* Function call is skipped due to function is undefined */} 95 dtm->lastuse = (__u64 )tmp___0; 96 unsigned long long __CPAchecker_TMP_2 = (unsigned long long)(stm->firstuse); 96 tmp___1 = jiffies_to_clock_t((unsigned long)(((unsigned long long)jiffies) - __CPAchecker_TMP_2)) { /* Function call is skipped due to function is undefined */} 96 dtm->firstuse = (__u64 )tmp___1; 97 unsigned long __CPAchecker_TMP_3 = (unsigned long)(stm->expires); 97 tmp___2 = jiffies_to_clock_t(__CPAchecker_TMP_3) { /* Function call is skipped due to function is undefined */} 97 dtm->expires = (__u64 )tmp___2; 98 return ;; } 227 tmp___8 = nla_put_64bit(skb, 1, 32, (const void *)(&t), 6) { /* Function call is skipped due to function is undefined */} 228 goto nla_put_failure; 231 nla_put_failure:; { } 533 int __ret_warn_on; 534 long tmp; 533 assume(!(((unsigned long)mark) != ((unsigned long)((const void *)0)))); 537 return ;; } 503 goto ldv_51824; 626 ldv_51824:; 627 ldv_51834:; 459 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 461 goto ldv_51833; 460 ldv_51833:; 462 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 462 switch (tmp___0); 515 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} 516 -tcf_skbmod_init(var_group3, var_group4, var_tcf_skbmod_init_1_p2, var_tcf_skbmod_init_1_p3, var_tcf_skbmod_init_1_p4, var_tcf_skbmod_init_1_p5) { } 91 struct tc_action_net *tn; 92 void *tmp; 93 struct nlattr *tb[7U]; 94 struct tcf_skbmod_params *p; 95 struct tcf_skbmod_params *p_old; 96 struct tc_skbmod *parm; 97 struct tcf_skbmod *d; 98 _Bool exists; 99 u8 *daddr; 100 u8 *saddr; 101 unsigned short eth_type; 102 unsigned int lflags; 103 int ret; 104 int err; 105 void *tmp___0; 106 void *tmp___1; 107 void *tmp___2; 108 int tmp___3; 109 long tmp___4; 110 void *tmp___5; 111 long tmp___6; 112 _Bool __warned; 113 int tmp___7; 114 _Bool tmp___8; 115 int tmp___9; 116 unsigned short tmp___10; 117 void *tmp___11; { 40 struct net_generic *ng; 41 void *ptr; 42 struct net_generic *________p1; 43 struct net_generic *_________p1; 44 union __anonunion___u_480 __u; 45 _Bool __warned; 46 int tmp; 47 int tmp___0; { 243 switch (size); 244 assume(!(size == 1)); 244 assume(!(size == 2)); 244 assume(!(size == 4)); 244 assume(size == 8); 243 *((__u64 *)res) = *((volatile __u64 *)p); 243 goto ldv_879; 245 return ;; } 44 _________p1 = __u.__val; 44 ________p1 = _________p1; 44 tmp = debug_lockdep_rcu_enabled() { /* Function call is skipped due to function is undefined */} 44 assume(!(tmp != 0)); 44 ng = ________p1; 45 ptr = (ng->__annonCompField117.ptr)[id]; 48 return ptr;; } 92 tn = (struct tc_action_net *)tmp; 97 exists = 0; 98 daddr = (u8 *)0U; 99 saddr = (u8 *)0U; 100 eth_type = 0U; 101 lflags = 0U; 102 ret = 0; 107 -nla_parse_nested((struct nlattr **)(&tb), 6, (const struct nlattr *)nla, (const struct nla_policy *)(&skbmod_policy)) { 747 int tmp; 748 void *tmp___0; 749 int tmp___1; { 691 int __CPAchecker_TMP_0 = (int)(nla->nla_len); 691 return __CPAchecker_TMP_0 + -4;; } { 682 return ((void *)nla) + 4U;; } 748 tmp___1 = nla_parse(tb, maxtype, (const struct nlattr *)tmp___0, tmp, policy) { /* Function call is skipped due to function is undefined */} 748 return tmp___1;; } { 682 return ((void *)nla) + 4U;; } 115 daddr = (u8 *)tmp___0; 116 lflags = lflags | 1U; { 682 return ((void *)nla) + 4U;; } 129 parm = (struct tc_skbmod *)tmp___2; 133 exists = tcf_hash_check(tn, parm->index, a, bind) { /* Function call is skipped due to function is undefined */} 141 ret = tcf_hash_create(tn, parm->index, est, a, (const struct tc_action_ops *)(&act_skbmod_ops), bind, 1) { /* Function call is skipped due to function is undefined */} 146 ret = 1; 153 d = (struct tcf_skbmod *)(*a); 155 tmp___3 = rtnl_is_locked() { /* Function call is skipped due to function is undefined */} { 636 void *tmp; { 480 void *tmp___2; 495 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */} 495 return tmp___2;; } 636 return tmp;; } 156 p = (struct tcf_skbmod_params *)tmp___5; 163 p->flags = (u64 )lflags; 164 d->common.tcfa_action = parm->action; 166 tmp___7 = debug_lockdep_rcu_enabled() { /* Function call is skipped due to function is undefined */} 166 p_old = d->skbmod_p; { } 277 *((u32 *)dst) = *((const u32 *)src); 278 *(((u16 *)dst) + 4U) = *(((const u16 *)src) + 4U); 279 return ;; } | Source code
1
2 /*
3 * net/sched/act_skbmod.c skb data modifier
4 *
5 * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <linux/rtnetlink.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20
21 #include <linux/tc_act/tc_skbmod.h>
22 #include <net/tc_act/tc_skbmod.h>
23
24 #define SKBMOD_TAB_MASK 15
25
26 static unsigned int skbmod_net_id;
27 static struct tc_action_ops act_skbmod_ops;
28
29 #define MAX_EDIT_LEN ETH_HLEN
30 static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
31 struct tcf_result *res)
32 {
33 struct tcf_skbmod *d = to_skbmod(a);
34 int action;
35 struct tcf_skbmod_params *p;
36 u64 flags;
37 int err;
38
39 tcf_lastuse_update(&d->tcf_tm);
40 bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
41
42 /* XXX: if you are going to edit more fields beyond ethernet header
43 * (example when you add IP header replacement or vlan swap)
44 * then MAX_EDIT_LEN needs to change appropriately
45 */
46 err = skb_ensure_writable(skb, MAX_EDIT_LEN);
47 if (unlikely(err)) { /* best policy is to drop on the floor */
48 qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
49 return TC_ACT_SHOT;
50 }
51
52 rcu_read_lock();
53 action = READ_ONCE(d->tcf_action);
54 if (unlikely(action == TC_ACT_SHOT)) {
55 qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
56 rcu_read_unlock();
57 return action;
58 }
59
60 p = rcu_dereference(d->skbmod_p);
61 flags = p->flags;
62 if (flags & SKBMOD_F_DMAC)
63 ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
64 if (flags & SKBMOD_F_SMAC)
65 ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
66 if (flags & SKBMOD_F_ETYPE)
67 eth_hdr(skb)->h_proto = p->eth_type;
68 rcu_read_unlock();
69
70 if (flags & SKBMOD_F_SWAPMAC) {
71 u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
72 /*XXX: I am sure we can come up with more efficient swapping*/
73 ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
74 ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
75 ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
76 }
77
78 return action;
79 }
80
81 static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
82 [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) },
83 [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN },
84 [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN },
85 [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 },
86 };
87
88 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
89 struct nlattr *est, struct tc_action **a,
90 int ovr, int bind)
91 {
92 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
93 struct nlattr *tb[TCA_SKBMOD_MAX + 1];
94 struct tcf_skbmod_params *p, *p_old;
95 struct tc_skbmod *parm;
96 struct tcf_skbmod *d;
97 bool exists = false;
98 u8 *daddr = NULL;
99 u8 *saddr = NULL;
100 u16 eth_type = 0;
101 u32 lflags = 0;
102 int ret = 0, err;
103
104 if (!nla)
105 return -EINVAL;
106
107 err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy);
108 if (err < 0)
109 return err;
110
111 if (!tb[TCA_SKBMOD_PARMS])
112 return -EINVAL;
113
114 if (tb[TCA_SKBMOD_DMAC]) {
115 daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
116 lflags |= SKBMOD_F_DMAC;
117 }
118
119 if (tb[TCA_SKBMOD_SMAC]) {
120 saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
121 lflags |= SKBMOD_F_SMAC;
122 }
123
124 if (tb[TCA_SKBMOD_ETYPE]) {
125 eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
126 lflags |= SKBMOD_F_ETYPE;
127 }
128
129 parm = nla_data(tb[TCA_SKBMOD_PARMS]);
130 if (parm->flags & SKBMOD_F_SWAPMAC)
131 lflags = SKBMOD_F_SWAPMAC;
132
133 exists = tcf_hash_check(tn, parm->index, a, bind);
134 if (exists && bind)
135 return 0;
136
137 if (!lflags)
138 return -EINVAL;
139
140 if (!exists) {
141 ret = tcf_hash_create(tn, parm->index, est, a,
142 &act_skbmod_ops, bind, true);
143 if (ret)
144 return ret;
145
146 ret = ACT_P_CREATED;
147 } else {
148 tcf_hash_release(*a, bind);
149 if (!ovr)
150 return -EEXIST;
151 }
152
153 d = to_skbmod(*a);
154
155 ASSERT_RTNL();
156 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
157 if (unlikely(!p)) {
158 if (ovr)
159 tcf_hash_release(*a, bind);
160 return -ENOMEM;
161 }
162
163 p->flags = lflags;
164 d->tcf_action = parm->action;
165
166 p_old = rtnl_dereference(d->skbmod_p);
167
168 if (ovr)
169 spin_lock_bh(&d->tcf_lock);
170
171 if (lflags & SKBMOD_F_DMAC)
172 ether_addr_copy(p->eth_dst, daddr);
173 if (lflags & SKBMOD_F_SMAC)
174 ether_addr_copy(p->eth_src, saddr);
175 if (lflags & SKBMOD_F_ETYPE)
176 p->eth_type = htons(eth_type);
177
178 rcu_assign_pointer(d->skbmod_p, p);
179 if (ovr)
180 spin_unlock_bh(&d->tcf_lock);
181
182 if (p_old)
183 kfree_rcu(p_old, rcu);
184
185 if (ret == ACT_P_CREATED)
186 tcf_hash_insert(tn, *a);
187 return ret;
188 }
189
190 static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
191 {
192 struct tcf_skbmod *d = to_skbmod(a);
193 struct tcf_skbmod_params *p;
194
195 p = rcu_dereference_protected(d->skbmod_p, 1);
196 kfree_rcu(p, rcu);
197 }
198
199 static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
200 int bind, int ref)
201 {
202 struct tcf_skbmod *d = to_skbmod(a);
203 unsigned char *b = skb_tail_pointer(skb);
204 struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p);
205 struct tc_skbmod opt = {
206 .index = d->tcf_index,
207 .refcnt = d->tcf_refcnt - ref,
208 .bindcnt = d->tcf_bindcnt - bind,
209 .action = d->tcf_action,
210 };
211 struct tcf_t t;
212
213 opt.flags = p->flags;
214 if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
215 goto nla_put_failure;
216 if ((p->flags & SKBMOD_F_DMAC) &&
217 nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
218 goto nla_put_failure;
219 if ((p->flags & SKBMOD_F_SMAC) &&
220 nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
221 goto nla_put_failure;
222 if ((p->flags & SKBMOD_F_ETYPE) &&
223 nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
224 goto nla_put_failure;
225
226 tcf_tm_dump(&t, &d->tcf_tm);
227 if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
228 goto nla_put_failure;
229
230 return skb->len;
231 nla_put_failure:
232 rcu_read_unlock();
233 nlmsg_trim(skb, b);
234 return -1;
235 }
236
237 static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
238 struct netlink_callback *cb, int type,
239 const struct tc_action_ops *ops)
240 {
241 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
242
243 return tcf_generic_walker(tn, skb, cb, type, ops);
244 }
245
246 static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
247 {
248 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
249
250 return tcf_hash_search(tn, a, index);
251 }
252
253 static struct tc_action_ops act_skbmod_ops = {
254 .kind = "skbmod",
255 .type = TCA_ACT_SKBMOD,
256 .owner = THIS_MODULE,
257 .act = tcf_skbmod_run,
258 .dump = tcf_skbmod_dump,
259 .init = tcf_skbmod_init,
260 .cleanup = tcf_skbmod_cleanup,
261 .walk = tcf_skbmod_walker,
262 .lookup = tcf_skbmod_search,
263 .size = sizeof(struct tcf_skbmod),
264 };
265
266 static __net_init int skbmod_init_net(struct net *net)
267 {
268 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
269
270 return tc_action_net_init(tn, &act_skbmod_ops, SKBMOD_TAB_MASK);
271 }
272
273 static void __net_exit skbmod_exit_net(struct net *net)
274 {
275 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
276
277 tc_action_net_exit(tn);
278 }
279
280 static struct pernet_operations skbmod_net_ops = {
281 .init = skbmod_init_net,
282 .exit = skbmod_exit_net,
283 .id = &skbmod_net_id,
284 .size = sizeof(struct tc_action_net),
285 };
286
287 MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
288 MODULE_DESCRIPTION("SKB data mod-ing");
289 MODULE_LICENSE("GPL");
290
291 static int __init skbmod_init_module(void)
292 {
293 return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops);
294 }
295
296 static void __exit skbmod_cleanup_module(void)
297 {
298 tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops);
299 }
300
301 module_init(skbmod_init_module);
302 module_exit(skbmod_cleanup_module);
303
304
305
306
307
308 /* LDV_COMMENT_BEGIN_MAIN */
309 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
310
311 /*###########################################################################*/
312
313 /*############## Driver Environment Generator 0.2 output ####################*/
314
315 /*###########################################################################*/
316
317
318
319 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
320 void ldv_check_final_state(void);
321
322 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
323 void ldv_check_return_value(int res);
324
325 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
326 void ldv_check_return_value_probe(int res);
327
328 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
329 void ldv_initialize(void);
330
331 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
332 void ldv_handler_precall(void);
333
334 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
335 int nondet_int(void);
336
337 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
338 int LDV_IN_INTERRUPT;
339
340 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
341 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
342
343
344
345 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
346 /*============================= VARIABLE DECLARATION PART =============================*/
347 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
348 /* content: static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
349 /* LDV_COMMENT_BEGIN_PREP */
350 #define SKBMOD_TAB_MASK 15
351 #define MAX_EDIT_LEN ETH_HLEN
352 /* LDV_COMMENT_END_PREP */
353 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_run" */
354 struct sk_buff * var_group1;
355 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_run" */
356 const struct tc_action * var_tcf_skbmod_run_0_p1;
357 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_run" */
358 struct tcf_result * var_tcf_skbmod_run_0_p2;
359 /* content: static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
360 /* LDV_COMMENT_BEGIN_PREP */
361 #define SKBMOD_TAB_MASK 15
362 #define MAX_EDIT_LEN ETH_HLEN
363 /* LDV_COMMENT_END_PREP */
364 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_dump" */
365 struct tc_action * var_group2;
366 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_dump" */
367 int var_tcf_skbmod_dump_3_p2;
368 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_dump" */
369 int var_tcf_skbmod_dump_3_p3;
370 /* content: static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind)*/
371 /* LDV_COMMENT_BEGIN_PREP */
372 #define SKBMOD_TAB_MASK 15
373 #define MAX_EDIT_LEN ETH_HLEN
374 /* LDV_COMMENT_END_PREP */
375 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
376 struct net * var_group3;
377 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
378 struct nlattr * var_group4;
379 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
380 struct nlattr * var_tcf_skbmod_init_1_p2;
381 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
382 struct tc_action ** var_tcf_skbmod_init_1_p3;
383 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
384 int var_tcf_skbmod_init_1_p4;
385 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
386 int var_tcf_skbmod_init_1_p5;
387 /* content: static void tcf_skbmod_cleanup(struct tc_action *a, int bind)*/
388 /* LDV_COMMENT_BEGIN_PREP */
389 #define SKBMOD_TAB_MASK 15
390 #define MAX_EDIT_LEN ETH_HLEN
391 /* LDV_COMMENT_END_PREP */
392 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_cleanup" */
393 int var_tcf_skbmod_cleanup_2_p1;
394 /* content: static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops)*/
395 /* LDV_COMMENT_BEGIN_PREP */
396 #define SKBMOD_TAB_MASK 15
397 #define MAX_EDIT_LEN ETH_HLEN
398 /* LDV_COMMENT_END_PREP */
399 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_walker" */
400 struct netlink_callback * var_tcf_skbmod_walker_4_p2;
401 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_walker" */
402 int var_tcf_skbmod_walker_4_p3;
403 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_walker" */
404 const struct tc_action_ops * var_tcf_skbmod_walker_4_p4;
405 /* content: static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)*/
406 /* LDV_COMMENT_BEGIN_PREP */
407 #define SKBMOD_TAB_MASK 15
408 #define MAX_EDIT_LEN ETH_HLEN
409 /* LDV_COMMENT_END_PREP */
410 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_search" */
411 struct tc_action ** var_group5;
412 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_search" */
413 u32 var_tcf_skbmod_search_5_p2;
414
415 /** STRUCT: struct type: pernet_operations, struct name: skbmod_net_ops **/
416 /* content: static __net_init int skbmod_init_net(struct net *net)*/
417 /* LDV_COMMENT_BEGIN_PREP */
418 #define SKBMOD_TAB_MASK 15
419 #define MAX_EDIT_LEN ETH_HLEN
420 /* LDV_COMMENT_END_PREP */
421 /* content: static void __net_exit skbmod_exit_net(struct net *net)*/
422 /* LDV_COMMENT_BEGIN_PREP */
423 #define SKBMOD_TAB_MASK 15
424 #define MAX_EDIT_LEN ETH_HLEN
425 /* LDV_COMMENT_END_PREP */
426
427
428
429
430 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
431 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
432 /*============================= VARIABLE INITIALIZING PART =============================*/
433 LDV_IN_INTERRUPT=1;
434
435
436
437
438 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
439 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
440 /*============================= FUNCTION CALL SECTION =============================*/
441 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
442 ldv_initialize();
443
444 /** INIT: init_type: ST_MODULE_INIT **/
445 /* content: static int __init skbmod_init_module(void)*/
446 /* LDV_COMMENT_BEGIN_PREP */
447 #define SKBMOD_TAB_MASK 15
448 #define MAX_EDIT_LEN ETH_HLEN
449 /* LDV_COMMENT_END_PREP */
450 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
451 ldv_handler_precall();
452 if(skbmod_init_module())
453 goto ldv_final;
454
455
456
457
458
459 while( nondet_int()
460 ) {
461
462 switch(nondet_int()) {
463
464 case 0: {
465
466 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
467
468
469 /* content: static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
470 /* LDV_COMMENT_BEGIN_PREP */
471 #define SKBMOD_TAB_MASK 15
472 #define MAX_EDIT_LEN ETH_HLEN
473 /* LDV_COMMENT_END_PREP */
474 /* LDV_COMMENT_FUNCTION_CALL Function from field "act" from driver structure with callbacks "act_skbmod_ops" */
475 ldv_handler_precall();
476 tcf_skbmod_run( var_group1, var_tcf_skbmod_run_0_p1, var_tcf_skbmod_run_0_p2);
477
478
479
480
481 }
482
483 break;
484 case 1: {
485
486 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
487
488
489 /* content: static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
490 /* LDV_COMMENT_BEGIN_PREP */
491 #define SKBMOD_TAB_MASK 15
492 #define MAX_EDIT_LEN ETH_HLEN
493 /* LDV_COMMENT_END_PREP */
494 /* LDV_COMMENT_FUNCTION_CALL Function from field "dump" from driver structure with callbacks "act_skbmod_ops" */
495 ldv_handler_precall();
496 tcf_skbmod_dump( var_group1, var_group2, var_tcf_skbmod_dump_3_p2, var_tcf_skbmod_dump_3_p3);
497
498
499
500
501 }
502
503 break;
504 case 2: {
505
506 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
507
508
509 /* content: static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind)*/
510 /* LDV_COMMENT_BEGIN_PREP */
511 #define SKBMOD_TAB_MASK 15
512 #define MAX_EDIT_LEN ETH_HLEN
513 /* LDV_COMMENT_END_PREP */
514 /* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "act_skbmod_ops" */
515 ldv_handler_precall();
516 tcf_skbmod_init( var_group3, var_group4, var_tcf_skbmod_init_1_p2, var_tcf_skbmod_init_1_p3, var_tcf_skbmod_init_1_p4, var_tcf_skbmod_init_1_p5);
517
518
519
520
521 }
522
523 break;
524 case 3: {
525
526 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
527
528
529 /* content: static void tcf_skbmod_cleanup(struct tc_action *a, int bind)*/
530 /* LDV_COMMENT_BEGIN_PREP */
531 #define SKBMOD_TAB_MASK 15
532 #define MAX_EDIT_LEN ETH_HLEN
533 /* LDV_COMMENT_END_PREP */
534 /* LDV_COMMENT_FUNCTION_CALL Function from field "cleanup" from driver structure with callbacks "act_skbmod_ops" */
535 ldv_handler_precall();
536 tcf_skbmod_cleanup( var_group2, var_tcf_skbmod_cleanup_2_p1);
537
538
539
540
541 }
542
543 break;
544 case 4: {
545
546 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
547
548
549 /* content: static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops)*/
550 /* LDV_COMMENT_BEGIN_PREP */
551 #define SKBMOD_TAB_MASK 15
552 #define MAX_EDIT_LEN ETH_HLEN
553 /* LDV_COMMENT_END_PREP */
554 /* LDV_COMMENT_FUNCTION_CALL Function from field "walk" from driver structure with callbacks "act_skbmod_ops" */
555 ldv_handler_precall();
556 tcf_skbmod_walker( var_group3, var_group1, var_tcf_skbmod_walker_4_p2, var_tcf_skbmod_walker_4_p3, var_tcf_skbmod_walker_4_p4);
557
558
559
560
561 }
562
563 break;
564 case 5: {
565
566 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
567
568
569 /* content: static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)*/
570 /* LDV_COMMENT_BEGIN_PREP */
571 #define SKBMOD_TAB_MASK 15
572 #define MAX_EDIT_LEN ETH_HLEN
573 /* LDV_COMMENT_END_PREP */
574 /* LDV_COMMENT_FUNCTION_CALL Function from field "lookup" from driver structure with callbacks "act_skbmod_ops" */
575 ldv_handler_precall();
576 tcf_skbmod_search( var_group3, var_group5, var_tcf_skbmod_search_5_p2);
577
578
579
580
581 }
582
583 break;
584 case 6: {
585
586 /** STRUCT: struct type: pernet_operations, struct name: skbmod_net_ops **/
587
588
589 /* content: static __net_init int skbmod_init_net(struct net *net)*/
590 /* LDV_COMMENT_BEGIN_PREP */
591 #define SKBMOD_TAB_MASK 15
592 #define MAX_EDIT_LEN ETH_HLEN
593 /* LDV_COMMENT_END_PREP */
594 /* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "skbmod_net_ops" */
595 ldv_handler_precall();
596 skbmod_init_net( var_group3);
597
598
599
600
601 }
602
603 break;
604 case 7: {
605
606 /** STRUCT: struct type: pernet_operations, struct name: skbmod_net_ops **/
607
608
609 /* content: static void __net_exit skbmod_exit_net(struct net *net)*/
610 /* LDV_COMMENT_BEGIN_PREP */
611 #define SKBMOD_TAB_MASK 15
612 #define MAX_EDIT_LEN ETH_HLEN
613 /* LDV_COMMENT_END_PREP */
614 /* LDV_COMMENT_FUNCTION_CALL Function from field "exit" from driver structure with callbacks "skbmod_net_ops" */
615 ldv_handler_precall();
616 skbmod_exit_net( var_group3);
617
618
619
620
621 }
622
623 break;
624 default: break;
625
626 }
627
628 }
629
630 ldv_module_exit:
631
632 /** INIT: init_type: ST_MODULE_EXIT **/
633 /* content: static void __exit skbmod_cleanup_module(void)*/
634 /* LDV_COMMENT_BEGIN_PREP */
635 #define SKBMOD_TAB_MASK 15
636 #define MAX_EDIT_LEN ETH_HLEN
637 /* LDV_COMMENT_END_PREP */
638 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
639 ldv_handler_precall();
640 skbmod_cleanup_module();
641
642 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
643 ldv_final: ldv_check_final_state();
644
645 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
646 return;
647
648 }
649 #endif
650
651 /* LDV_COMMENT_END_MAIN */ 1
2
3 #include <verifier/rcv.h>
4
5 extern void ldv_rcu_inc( void );
6 extern void ldv_rcu_bh_inc( void );
7 extern void ldv_rcu_sched_inc( void );
8 extern void ldv_srcu_inc( void );
9
10 extern void ldv_rcu_dec( void );
11 extern void ldv_rcu_bh_dec( void );
12 extern void ldv_rcu_sched_dec( void );
13 extern void ldv_srcu_dec( void );
14
15 extern void ldv_check_for_read_section( void );
16
17 #line 1 "/home/vitaly/ldv-launches/work/current--X--net--X--defaultlinux-4.10-rc1.tar.xz--X--147_1a--X--cpachecker/linux-4.10-rc1.tar.xz/csd_deg_dscv/657/dscv_tempdir/dscv/ri/147_1a/net/sched/act_skbmod.c"
18
19 /*
20 * net/sched/act_skbmod.c skb data modifier
21 *
22 * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2 of the License, or
27 * (at your option) any later version.
28 */
29
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/skbuff.h>
34 #include <linux/rtnetlink.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37
38 #include <linux/tc_act/tc_skbmod.h>
39 #include <net/tc_act/tc_skbmod.h>
40
41 #define SKBMOD_TAB_MASK 15
42
43 static unsigned int skbmod_net_id;
44 static struct tc_action_ops act_skbmod_ops;
45
46 #define MAX_EDIT_LEN ETH_HLEN
47 static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
48 struct tcf_result *res)
49 {
50 struct tcf_skbmod *d = to_skbmod(a);
51 int action;
52 struct tcf_skbmod_params *p;
53 u64 flags;
54 int err;
55
56 tcf_lastuse_update(&d->tcf_tm);
57 bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
58
59 /* XXX: if you are going to edit more fields beyond ethernet header
60 * (example when you add IP header replacement or vlan swap)
61 * then MAX_EDIT_LEN needs to change appropriately
62 */
63 err = skb_ensure_writable(skb, MAX_EDIT_LEN);
64 if (unlikely(err)) { /* best policy is to drop on the floor */
65 qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
66 return TC_ACT_SHOT;
67 }
68
69 rcu_read_lock();
70 action = READ_ONCE(d->tcf_action);
71 if (unlikely(action == TC_ACT_SHOT)) {
72 qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
73 rcu_read_unlock();
74 return action;
75 }
76
77 p = rcu_dereference(d->skbmod_p);
78 flags = p->flags;
79 if (flags & SKBMOD_F_DMAC)
80 ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
81 if (flags & SKBMOD_F_SMAC)
82 ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
83 if (flags & SKBMOD_F_ETYPE)
84 eth_hdr(skb)->h_proto = p->eth_type;
85 rcu_read_unlock();
86
87 if (flags & SKBMOD_F_SWAPMAC) {
88 u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
89 /*XXX: I am sure we can come up with more efficient swapping*/
90 ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
91 ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
92 ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
93 }
94
95 return action;
96 }
97
98 static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
99 [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) },
100 [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN },
101 [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN },
102 [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 },
103 };
104
105 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
106 struct nlattr *est, struct tc_action **a,
107 int ovr, int bind)
108 {
109 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
110 struct nlattr *tb[TCA_SKBMOD_MAX + 1];
111 struct tcf_skbmod_params *p, *p_old;
112 struct tc_skbmod *parm;
113 struct tcf_skbmod *d;
114 bool exists = false;
115 u8 *daddr = NULL;
116 u8 *saddr = NULL;
117 u16 eth_type = 0;
118 u32 lflags = 0;
119 int ret = 0, err;
120
121 if (!nla)
122 return -EINVAL;
123
124 err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy);
125 if (err < 0)
126 return err;
127
128 if (!tb[TCA_SKBMOD_PARMS])
129 return -EINVAL;
130
131 if (tb[TCA_SKBMOD_DMAC]) {
132 daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
133 lflags |= SKBMOD_F_DMAC;
134 }
135
136 if (tb[TCA_SKBMOD_SMAC]) {
137 saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
138 lflags |= SKBMOD_F_SMAC;
139 }
140
141 if (tb[TCA_SKBMOD_ETYPE]) {
142 eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
143 lflags |= SKBMOD_F_ETYPE;
144 }
145
146 parm = nla_data(tb[TCA_SKBMOD_PARMS]);
147 if (parm->flags & SKBMOD_F_SWAPMAC)
148 lflags = SKBMOD_F_SWAPMAC;
149
150 exists = tcf_hash_check(tn, parm->index, a, bind);
151 if (exists && bind)
152 return 0;
153
154 if (!lflags)
155 return -EINVAL;
156
157 if (!exists) {
158 ret = tcf_hash_create(tn, parm->index, est, a,
159 &act_skbmod_ops, bind, true);
160 if (ret)
161 return ret;
162
163 ret = ACT_P_CREATED;
164 } else {
165 tcf_hash_release(*a, bind);
166 if (!ovr)
167 return -EEXIST;
168 }
169
170 d = to_skbmod(*a);
171
172 ASSERT_RTNL();
173 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
174 if (unlikely(!p)) {
175 if (ovr)
176 tcf_hash_release(*a, bind);
177 return -ENOMEM;
178 }
179
180 p->flags = lflags;
181 d->tcf_action = parm->action;
182
183 p_old = rtnl_dereference(d->skbmod_p);
184
185 if (ovr)
186 spin_lock_bh(&d->tcf_lock);
187
188 if (lflags & SKBMOD_F_DMAC)
189 ether_addr_copy(p->eth_dst, daddr);
190 if (lflags & SKBMOD_F_SMAC)
191 ether_addr_copy(p->eth_src, saddr);
192 if (lflags & SKBMOD_F_ETYPE)
193 p->eth_type = htons(eth_type);
194
195 rcu_assign_pointer(d->skbmod_p, p);
196 if (ovr)
197 spin_unlock_bh(&d->tcf_lock);
198
199 if (p_old)
200 kfree_rcu(p_old, rcu);
201
202 if (ret == ACT_P_CREATED)
203 tcf_hash_insert(tn, *a);
204 return ret;
205 }
206
207 static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
208 {
209 struct tcf_skbmod *d = to_skbmod(a);
210 struct tcf_skbmod_params *p;
211
212 p = rcu_dereference_protected(d->skbmod_p, 1);
213 kfree_rcu(p, rcu);
214 }
215
216 static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
217 int bind, int ref)
218 {
219 struct tcf_skbmod *d = to_skbmod(a);
220 unsigned char *b = skb_tail_pointer(skb);
221 struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p);
222 struct tc_skbmod opt = {
223 .index = d->tcf_index,
224 .refcnt = d->tcf_refcnt - ref,
225 .bindcnt = d->tcf_bindcnt - bind,
226 .action = d->tcf_action,
227 };
228 struct tcf_t t;
229
230 opt.flags = p->flags;
231 if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
232 goto nla_put_failure;
233 if ((p->flags & SKBMOD_F_DMAC) &&
234 nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
235 goto nla_put_failure;
236 if ((p->flags & SKBMOD_F_SMAC) &&
237 nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
238 goto nla_put_failure;
239 if ((p->flags & SKBMOD_F_ETYPE) &&
240 nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
241 goto nla_put_failure;
242
243 tcf_tm_dump(&t, &d->tcf_tm);
244 if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
245 goto nla_put_failure;
246
247 return skb->len;
248 nla_put_failure:
249 rcu_read_unlock();
250 nlmsg_trim(skb, b);
251 return -1;
252 }
253
254 static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
255 struct netlink_callback *cb, int type,
256 const struct tc_action_ops *ops)
257 {
258 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
259
260 return tcf_generic_walker(tn, skb, cb, type, ops);
261 }
262
263 static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
264 {
265 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
266
267 return tcf_hash_search(tn, a, index);
268 }
269
270 static struct tc_action_ops act_skbmod_ops = {
271 .kind = "skbmod",
272 .type = TCA_ACT_SKBMOD,
273 .owner = THIS_MODULE,
274 .act = tcf_skbmod_run,
275 .dump = tcf_skbmod_dump,
276 .init = tcf_skbmod_init,
277 .cleanup = tcf_skbmod_cleanup,
278 .walk = tcf_skbmod_walker,
279 .lookup = tcf_skbmod_search,
280 .size = sizeof(struct tcf_skbmod),
281 };
282
283 static __net_init int skbmod_init_net(struct net *net)
284 {
285 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
286
287 return tc_action_net_init(tn, &act_skbmod_ops, SKBMOD_TAB_MASK);
288 }
289
290 static void __net_exit skbmod_exit_net(struct net *net)
291 {
292 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
293
294 tc_action_net_exit(tn);
295 }
296
297 static struct pernet_operations skbmod_net_ops = {
298 .init = skbmod_init_net,
299 .exit = skbmod_exit_net,
300 .id = &skbmod_net_id,
301 .size = sizeof(struct tc_action_net),
302 };
303
304 MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
305 MODULE_DESCRIPTION("SKB data mod-ing");
306 MODULE_LICENSE("GPL");
307
308 static int __init skbmod_init_module(void)
309 {
310 return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops);
311 }
312
313 static void __exit skbmod_cleanup_module(void)
314 {
315 tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops);
316 }
317
318 module_init(skbmod_init_module);
319 module_exit(skbmod_cleanup_module);
320
321
322
323
324
325 /* LDV_COMMENT_BEGIN_MAIN */
326 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
327
328 /*###########################################################################*/
329
330 /*############## Driver Environment Generator 0.2 output ####################*/
331
332 /*###########################################################################*/
333
334
335
336 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
337 void ldv_check_final_state(void);
338
339 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
340 void ldv_check_return_value(int res);
341
342 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
343 void ldv_check_return_value_probe(int res);
344
345 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
346 void ldv_initialize(void);
347
348 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
349 void ldv_handler_precall(void);
350
351 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
352 int nondet_int(void);
353
354 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
355 int LDV_IN_INTERRUPT;
356
357 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
358 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
359
360
361
362 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
363 /*============================= VARIABLE DECLARATION PART =============================*/
364 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
365 /* content: static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
366 /* LDV_COMMENT_BEGIN_PREP */
367 #define SKBMOD_TAB_MASK 15
368 #define MAX_EDIT_LEN ETH_HLEN
369 /* LDV_COMMENT_END_PREP */
370 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_run" */
371 struct sk_buff * var_group1;
372 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_run" */
373 const struct tc_action * var_tcf_skbmod_run_0_p1;
374 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_run" */
375 struct tcf_result * var_tcf_skbmod_run_0_p2;
376 /* content: static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
377 /* LDV_COMMENT_BEGIN_PREP */
378 #define SKBMOD_TAB_MASK 15
379 #define MAX_EDIT_LEN ETH_HLEN
380 /* LDV_COMMENT_END_PREP */
381 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_dump" */
382 struct tc_action * var_group2;
383 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_dump" */
384 int var_tcf_skbmod_dump_3_p2;
385 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_dump" */
386 int var_tcf_skbmod_dump_3_p3;
387 /* content: static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind)*/
388 /* LDV_COMMENT_BEGIN_PREP */
389 #define SKBMOD_TAB_MASK 15
390 #define MAX_EDIT_LEN ETH_HLEN
391 /* LDV_COMMENT_END_PREP */
392 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
393 struct net * var_group3;
394 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
395 struct nlattr * var_group4;
396 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
397 struct nlattr * var_tcf_skbmod_init_1_p2;
398 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
399 struct tc_action ** var_tcf_skbmod_init_1_p3;
400 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
401 int var_tcf_skbmod_init_1_p4;
402 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_init" */
403 int var_tcf_skbmod_init_1_p5;
404 /* content: static void tcf_skbmod_cleanup(struct tc_action *a, int bind)*/
405 /* LDV_COMMENT_BEGIN_PREP */
406 #define SKBMOD_TAB_MASK 15
407 #define MAX_EDIT_LEN ETH_HLEN
408 /* LDV_COMMENT_END_PREP */
409 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_cleanup" */
410 int var_tcf_skbmod_cleanup_2_p1;
411 /* content: static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops)*/
412 /* LDV_COMMENT_BEGIN_PREP */
413 #define SKBMOD_TAB_MASK 15
414 #define MAX_EDIT_LEN ETH_HLEN
415 /* LDV_COMMENT_END_PREP */
416 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_walker" */
417 struct netlink_callback * var_tcf_skbmod_walker_4_p2;
418 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_walker" */
419 int var_tcf_skbmod_walker_4_p3;
420 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_walker" */
421 const struct tc_action_ops * var_tcf_skbmod_walker_4_p4;
422 /* content: static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)*/
423 /* LDV_COMMENT_BEGIN_PREP */
424 #define SKBMOD_TAB_MASK 15
425 #define MAX_EDIT_LEN ETH_HLEN
426 /* LDV_COMMENT_END_PREP */
427 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_search" */
428 struct tc_action ** var_group5;
429 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_skbmod_search" */
430 u32 var_tcf_skbmod_search_5_p2;
431
432 /** STRUCT: struct type: pernet_operations, struct name: skbmod_net_ops **/
433 /* content: static __net_init int skbmod_init_net(struct net *net)*/
434 /* LDV_COMMENT_BEGIN_PREP */
435 #define SKBMOD_TAB_MASK 15
436 #define MAX_EDIT_LEN ETH_HLEN
437 /* LDV_COMMENT_END_PREP */
438 /* content: static void __net_exit skbmod_exit_net(struct net *net)*/
439 /* LDV_COMMENT_BEGIN_PREP */
440 #define SKBMOD_TAB_MASK 15
441 #define MAX_EDIT_LEN ETH_HLEN
442 /* LDV_COMMENT_END_PREP */
443
444
445
446
447 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
448 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
449 /*============================= VARIABLE INITIALIZING PART =============================*/
450 LDV_IN_INTERRUPT=1;
451
452
453
454
455 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
456 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
457 /*============================= FUNCTION CALL SECTION =============================*/
458 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
459 ldv_initialize();
460
461 /** INIT: init_type: ST_MODULE_INIT **/
462 /* content: static int __init skbmod_init_module(void)*/
463 /* LDV_COMMENT_BEGIN_PREP */
464 #define SKBMOD_TAB_MASK 15
465 #define MAX_EDIT_LEN ETH_HLEN
466 /* LDV_COMMENT_END_PREP */
467 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
468 ldv_handler_precall();
469 if(skbmod_init_module())
470 goto ldv_final;
471
472
473
474
475
476 while( nondet_int()
477 ) {
478
479 switch(nondet_int()) {
480
481 case 0: {
482
483 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
484
485
486 /* content: static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
487 /* LDV_COMMENT_BEGIN_PREP */
488 #define SKBMOD_TAB_MASK 15
489 #define MAX_EDIT_LEN ETH_HLEN
490 /* LDV_COMMENT_END_PREP */
491 /* LDV_COMMENT_FUNCTION_CALL Function from field "act" from driver structure with callbacks "act_skbmod_ops" */
492 ldv_handler_precall();
493 tcf_skbmod_run( var_group1, var_tcf_skbmod_run_0_p1, var_tcf_skbmod_run_0_p2);
494
495
496
497
498 }
499
500 break;
501 case 1: {
502
503 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
504
505
506 /* content: static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
507 /* LDV_COMMENT_BEGIN_PREP */
508 #define SKBMOD_TAB_MASK 15
509 #define MAX_EDIT_LEN ETH_HLEN
510 /* LDV_COMMENT_END_PREP */
511 /* LDV_COMMENT_FUNCTION_CALL Function from field "dump" from driver structure with callbacks "act_skbmod_ops" */
512 ldv_handler_precall();
513 tcf_skbmod_dump( var_group1, var_group2, var_tcf_skbmod_dump_3_p2, var_tcf_skbmod_dump_3_p3);
514
515
516
517
518 }
519
520 break;
521 case 2: {
522
523 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
524
525
526 /* content: static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind)*/
527 /* LDV_COMMENT_BEGIN_PREP */
528 #define SKBMOD_TAB_MASK 15
529 #define MAX_EDIT_LEN ETH_HLEN
530 /* LDV_COMMENT_END_PREP */
531 /* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "act_skbmod_ops" */
532 ldv_handler_precall();
533 tcf_skbmod_init( var_group3, var_group4, var_tcf_skbmod_init_1_p2, var_tcf_skbmod_init_1_p3, var_tcf_skbmod_init_1_p4, var_tcf_skbmod_init_1_p5);
534
535
536
537
538 }
539
540 break;
541 case 3: {
542
543 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
544
545
546 /* content: static void tcf_skbmod_cleanup(struct tc_action *a, int bind)*/
547 /* LDV_COMMENT_BEGIN_PREP */
548 #define SKBMOD_TAB_MASK 15
549 #define MAX_EDIT_LEN ETH_HLEN
550 /* LDV_COMMENT_END_PREP */
551 /* LDV_COMMENT_FUNCTION_CALL Function from field "cleanup" from driver structure with callbacks "act_skbmod_ops" */
552 ldv_handler_precall();
553 tcf_skbmod_cleanup( var_group2, var_tcf_skbmod_cleanup_2_p1);
554
555
556
557
558 }
559
560 break;
561 case 4: {
562
563 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
564
565
566 /* content: static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops)*/
567 /* LDV_COMMENT_BEGIN_PREP */
568 #define SKBMOD_TAB_MASK 15
569 #define MAX_EDIT_LEN ETH_HLEN
570 /* LDV_COMMENT_END_PREP */
571 /* LDV_COMMENT_FUNCTION_CALL Function from field "walk" from driver structure with callbacks "act_skbmod_ops" */
572 ldv_handler_precall();
573 tcf_skbmod_walker( var_group3, var_group1, var_tcf_skbmod_walker_4_p2, var_tcf_skbmod_walker_4_p3, var_tcf_skbmod_walker_4_p4);
574
575
576
577
578 }
579
580 break;
581 case 5: {
582
583 /** STRUCT: struct type: tc_action_ops, struct name: act_skbmod_ops **/
584
585
586 /* content: static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)*/
587 /* LDV_COMMENT_BEGIN_PREP */
588 #define SKBMOD_TAB_MASK 15
589 #define MAX_EDIT_LEN ETH_HLEN
590 /* LDV_COMMENT_END_PREP */
591 /* LDV_COMMENT_FUNCTION_CALL Function from field "lookup" from driver structure with callbacks "act_skbmod_ops" */
592 ldv_handler_precall();
593 tcf_skbmod_search( var_group3, var_group5, var_tcf_skbmod_search_5_p2);
594
595
596
597
598 }
599
600 break;
601 case 6: {
602
603 /** STRUCT: struct type: pernet_operations, struct name: skbmod_net_ops **/
604
605
606 /* content: static __net_init int skbmod_init_net(struct net *net)*/
607 /* LDV_COMMENT_BEGIN_PREP */
608 #define SKBMOD_TAB_MASK 15
609 #define MAX_EDIT_LEN ETH_HLEN
610 /* LDV_COMMENT_END_PREP */
611 /* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "skbmod_net_ops" */
612 ldv_handler_precall();
613 skbmod_init_net( var_group3);
614
615
616
617
618 }
619
620 break;
621 case 7: {
622
623 /** STRUCT: struct type: pernet_operations, struct name: skbmod_net_ops **/
624
625
626 /* content: static void __net_exit skbmod_exit_net(struct net *net)*/
627 /* LDV_COMMENT_BEGIN_PREP */
628 #define SKBMOD_TAB_MASK 15
629 #define MAX_EDIT_LEN ETH_HLEN
630 /* LDV_COMMENT_END_PREP */
631 /* LDV_COMMENT_FUNCTION_CALL Function from field "exit" from driver structure with callbacks "skbmod_net_ops" */
632 ldv_handler_precall();
633 skbmod_exit_net( var_group3);
634
635
636
637
638 }
639
640 break;
641 default: break;
642
643 }
644
645 }
646
647 ldv_module_exit:
648
649 /** INIT: init_type: ST_MODULE_EXIT **/
650 /* content: static void __exit skbmod_cleanup_module(void)*/
651 /* LDV_COMMENT_BEGIN_PREP */
652 #define SKBMOD_TAB_MASK 15
653 #define MAX_EDIT_LEN ETH_HLEN
654 /* LDV_COMMENT_END_PREP */
655 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
656 ldv_handler_precall();
657 skbmod_cleanup_module();
658
659 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
660 ldv_final: ldv_check_final_state();
661
662 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
663 return;
664
665 }
666 #endif
667
668 /* LDV_COMMENT_END_MAIN */
669
670 #line 17 "/home/vitaly/ldv-launches/work/current--X--net--X--defaultlinux-4.10-rc1.tar.xz--X--147_1a--X--cpachecker/linux-4.10-rc1.tar.xz/csd_deg_dscv/657/dscv_tempdir/dscv/ri/147_1a/net/sched/act_skbmod.o.c.prepared" 1
2 #include <verifier/rcv.h>
3
4 /* LDV_COMMENT_MODEL_STATE Indicates the level of rcu_lock nesting.*/
5 int ldv_rcu_nested = 0;
6 /* LDV_COMMENT_MODEL_STATE Indicates the level of rcu_lock_bh nesting.*/
7 int ldv_rcu_bh_nested = 0;
8 /* LDV_COMMENT_MODEL_STATE Indicates the level of rcu_lock_sched nesting.*/
9 int ldv_rcu_sched_nested = 0;
10 /* LDV_COMMENT_MODEL_STATE Indicates the level of srcu_lock nesting.*/
11 int ldv_srcu_nested = 0;
12
13 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_rcu_inc') Entry in rcu_read_lock/unlock section.*/
14 void ldv_rcu_inc( void )
15 {
16 /* LDV_COMMENT_CHANGE_STATE Increments the level of rcu_read_lock nesting.*/
17 ++ldv_rcu_nested;
18 }
19
20 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_rcu_dec') Exit from rcu_read_lock/unlock section.*/
21 void ldv_rcu_dec( void )
22 {
23 /* LDV_COMMENT_CHANGE_STATE Decrements the level of rcu_read_lock nesting.*/
24 --ldv_rcu_nested;
25 }
26
27 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_rcu_bh_inc') Entry in rcu_read_lock_bh/unlock_bh section.*/
28 void ldv_rcu_bh_inc( void )
29 {
30 /* LDV_COMMENT_CHANGE_STATE Increments the level of rcu_read_lock_bh nesting.*/
31 ++ldv_rcu_bh_nested;
32 }
33
34 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_rcu_bh_dec') Exit from rcu_read_lock_bh/unlock_bh section.*/
35 void ldv_rcu_bh_dec( void )
36 {
37 /* LDV_COMMENT_CHANGE_STATE Decrements the level of rcu_read_lock_bh nesting.*/
38 --ldv_rcu_bh_nested;
39 }
40
41
42 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_rcu_sched_inc') Entry in rcu_read_lock_sched/unlock_sched section.*/
43 void ldv_rcu_sched_inc( void )
44 {
45 /* LDV_COMMENT_CHANGE_STATE Increments the level of rcu_read_lock_sched nesting.*/
46 ++ldv_rcu_sched_nested;
47 }
48
49 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_rcu_sched_dec') Exit from rcu_read_lock_sched/unlock_sched section.*/
50 void ldv_rcu_sched_dec( void )
51 {
52 /* LDV_COMMENT_CHANGE_STATE Decrements the level of rcu_read_lock_sched nesting.*/
53 --ldv_rcu_sched_nested;
54 }
55
56 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_srcu_inc') Entry in srcu_read_lock/unlock section.*/
57 void ldv_srcu_inc( void )
58 {
59 /* LDV_COMMENT_CHANGE_STATE Increments the level of srcu_read_lock nesting.*/
60 ++ldv_srcu_nested;
61 }
62
63 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_srcu_dec') Exit from srcu_read_lock/unlock section.*/
64 void ldv_srcu_dec( void )
65 {
66 /* LDV_COMMENT_CHANGE_STATE Decrements the level of srcu_read_lock nesting.*/
67 --ldv_srcu_nested;
68 }
69
70 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_for_read_section') Checks that no one opened rcu_lock/unlock section.*/
71 void ldv_check_for_read_section( void )
72 {
73 /* LDV_COMMENT_ASSERT checks the count of opened rcu_lock/unlock sections.*/
74 ldv_assert( ldv_rcu_nested == 0 );
75 /* LDV_COMMENT_ASSERT checks the count of opened rcu_lock_bh/unlock_bh sections.*/
76 ldv_assert( ldv_rcu_bh_nested == 0 );
77 /* LDV_COMMENT_ASSERT checks the count of opened rcu_lock_sched/unlock_sched sections.*/
78 ldv_assert( ldv_rcu_sched_nested == 0 );
79 /* LDV_COMMENT_ASSERT checks the count of opened srcu_lock/unlock sections.*/
80 ldv_assert( ldv_srcu_nested == 0 );
81 }
82
83 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Checks that all rcu_lock/unlock sections closed.*/
84 void ldv_check_final_state( void )
85 {
86 ldv_check_for_read_section();
87 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3
4 #ifndef __ASSEMBLY__
5
6 #ifdef __CHECKER__
7 # define __user __attribute__((noderef, address_space(1)))
8 # define __kernel __attribute__((address_space(0)))
9 # define __safe __attribute__((safe))
10 # define __force __attribute__((force))
11 # define __nocast __attribute__((nocast))
12 # define __iomem __attribute__((noderef, address_space(2)))
13 # define __must_hold(x) __attribute__((context(x,1,1)))
14 # define __acquires(x) __attribute__((context(x,0,1)))
15 # define __releases(x) __attribute__((context(x,1,0)))
16 # define __acquire(x) __context__(x,1)
17 # define __release(x) __context__(x,-1)
18 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu __attribute__((noderef, address_space(3)))
20 #ifdef CONFIG_SPARSE_RCU_POINTER
21 # define __rcu __attribute__((noderef, address_space(4)))
22 #else /* CONFIG_SPARSE_RCU_POINTER */
23 # define __rcu
24 #endif /* CONFIG_SPARSE_RCU_POINTER */
25 # define __private __attribute__((noderef))
26 extern void __chk_user_ptr(const volatile void __user *);
27 extern void __chk_io_ptr(const volatile void __iomem *);
28 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
29 #else /* __CHECKER__ */
30 # define __user
31 # define __kernel
32 # define __safe
33 # define __force
34 # define __nocast
35 # define __iomem
36 # define __chk_user_ptr(x) (void)0
37 # define __chk_io_ptr(x) (void)0
38 # define __builtin_warning(x, y...) (1)
39 # define __must_hold(x)
40 # define __acquires(x)
41 # define __releases(x)
42 # define __acquire(x) (void)0
43 # define __release(x) (void)0
44 # define __cond_lock(x,c) (c)
45 # define __percpu
46 # define __rcu
47 # define __private
48 # define ACCESS_PRIVATE(p, member) ((p)->member)
49 #endif /* __CHECKER__ */
50
51 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
52 #define ___PASTE(a,b) a##b
53 #define __PASTE(a,b) ___PASTE(a,b)
54
55 #ifdef __KERNEL__
56
57 #ifdef __GNUC__
58 #include <linux/compiler-gcc.h>
59 #endif
60
61 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
62 #define notrace __attribute__((hotpatch(0,0)))
63 #else
64 #define notrace __attribute__((no_instrument_function))
65 #endif
66
67 /* Intel compiler defines __GNUC__. So we will overwrite implementations
68 * coming from above header files here
69 */
70 #ifdef __INTEL_COMPILER
71 # include <linux/compiler-intel.h>
72 #endif
73
74 /* Clang compiler defines __GNUC__. So we will overwrite implementations
75 * coming from above header files here
76 */
77 #ifdef __clang__
78 #include <linux/compiler-clang.h>
79 #endif
80
81 /*
82 * Generic compiler-dependent macros required for kernel
83 * build go below this comment. Actual compiler/compiler version
84 * specific implementations come from the above header files
85 */
86
87 struct ftrace_branch_data {
88 const char *func;
89 const char *file;
90 unsigned line;
91 union {
92 struct {
93 unsigned long correct;
94 unsigned long incorrect;
95 };
96 struct {
97 unsigned long miss;
98 unsigned long hit;
99 };
100 unsigned long miss_hit[2];
101 };
102 };
103
104 /*
105 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
106 * to disable branch tracing on a per file basis.
107 */
108 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
109 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
110 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
111
112 #define likely_notrace(x) __builtin_expect(!!(x), 1)
113 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
114
115 #define __branch_check__(x, expect) ({ \
116 int ______r; \
117 static struct ftrace_branch_data \
118 __attribute__((__aligned__(4))) \
119 __attribute__((section("_ftrace_annotated_branch"))) \
120 ______f = { \
121 .func = __func__, \
122 .file = __FILE__, \
123 .line = __LINE__, \
124 }; \
125 ______r = likely_notrace(x); \
126 ftrace_likely_update(&______f, ______r, expect); \
127 ______r; \
128 })
129
130 /*
131 * Using __builtin_constant_p(x) to ignore cases where the return
132 * value is always the same. This idea is taken from a similar patch
133 * written by Daniel Walker.
134 */
135 # ifndef likely
136 # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
137 # endif
138 # ifndef unlikely
139 # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
140 # endif
141
142 #ifdef CONFIG_PROFILE_ALL_BRANCHES
143 /*
144 * "Define 'is'", Bill Clinton
145 * "Define 'if'", Steven Rostedt
146 */
147 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
148 #define __trace_if(cond) \
149 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
150 ({ \
151 int ______r; \
152 static struct ftrace_branch_data \
153 __attribute__((__aligned__(4))) \
154 __attribute__((section("_ftrace_branch"))) \
155 ______f = { \
156 .func = __func__, \
157 .file = __FILE__, \
158 .line = __LINE__, \
159 }; \
160 ______r = !!(cond); \
161 ______f.miss_hit[______r]++; \
162 ______r; \
163 }))
164 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
165
166 #else
167 # define likely(x) __builtin_expect(!!(x), 1)
168 # define unlikely(x) __builtin_expect(!!(x), 0)
169 #endif
170
171 /* Optimization barrier */
172 #ifndef barrier
173 # define barrier() __memory_barrier()
174 #endif
175
176 #ifndef barrier_data
177 # define barrier_data(ptr) barrier()
178 #endif
179
180 /* Unreachable code */
181 #ifndef unreachable
182 # define unreachable() do { } while (1)
183 #endif
184
185 /*
186 * KENTRY - kernel entry point
187 * This can be used to annotate symbols (functions or data) that are used
188 * without their linker symbol being referenced explicitly. For example,
189 * interrupt vector handlers, or functions in the kernel image that are found
190 * programatically.
191 *
192 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
193 * are handled in their own way (with KEEP() in linker scripts).
194 *
195 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
196 * linker script. For example an architecture could KEEP() its entire
197 * boot/exception vector code rather than annotate each function and data.
198 */
199 #ifndef KENTRY
200 # define KENTRY(sym) \
201 extern typeof(sym) sym; \
202 static const unsigned long __kentry_##sym \
203 __used \
204 __attribute__((section("___kentry" "+" #sym ), used)) \
205 = (unsigned long)&sym;
206 #endif
207
208 #ifndef RELOC_HIDE
209 # define RELOC_HIDE(ptr, off) \
210 ({ unsigned long __ptr; \
211 __ptr = (unsigned long) (ptr); \
212 (typeof(ptr)) (__ptr + (off)); })
213 #endif
214
215 #ifndef OPTIMIZER_HIDE_VAR
216 #define OPTIMIZER_HIDE_VAR(var) barrier()
217 #endif
218
219 /* Not-quite-unique ID. */
220 #ifndef __UNIQUE_ID
221 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
222 #endif
223
224 #include <uapi/linux/types.h>
225
226 #define __READ_ONCE_SIZE \
227 ({ \
228 switch (size) { \
229 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
230 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
231 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
232 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
233 default: \
234 barrier(); \
235 __builtin_memcpy((void *)res, (const void *)p, size); \
236 barrier(); \
237 } \
238 })
239
240 static __always_inline
241 void __read_once_size(const volatile void *p, void *res, int size)
242 {
243 __READ_ONCE_SIZE;
244 }
245
246 #ifdef CONFIG_KASAN
247 /*
248 * This function is not 'inline' because __no_sanitize_address confilcts
249 * with inlining. Attempt to inline it may cause a build failure.
250 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
251 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
252 */
253 static __no_sanitize_address __maybe_unused
254 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
255 {
256 __READ_ONCE_SIZE;
257 }
258 #else
259 static __always_inline
260 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
261 {
262 __READ_ONCE_SIZE;
263 }
264 #endif
265
266 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
267 {
268 switch (size) {
269 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
270 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
271 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
272 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
273 default:
274 barrier();
275 __builtin_memcpy((void *)p, (const void *)res, size);
276 barrier();
277 }
278 }
279
280 /*
281 * Prevent the compiler from merging or refetching reads or writes. The
282 * compiler is also forbidden from reordering successive instances of
283 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
284 * compiler is aware of some particular ordering. One way to make the
285 * compiler aware of ordering is to put the two invocations of READ_ONCE,
286 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
287 *
288 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
289 * data types like structs or unions. If the size of the accessed data
290 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
291 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
292 * least two memcpy()s: one for the __builtin_memcpy() and then one for
293 * the macro doing the copy of variable - '__u' allocated on the stack.
294 *
295 * Their two major use cases are: (1) Mediating communication between
296 * process-level code and irq/NMI handlers, all running on the same CPU,
297 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
298 * mutilate accesses that either do not require ordering or that interact
299 * with an explicit memory barrier or atomic instruction that provides the
300 * required ordering.
301 */
302
303 #define __READ_ONCE(x, check) \
304 ({ \
305 union { typeof(x) __val; char __c[1]; } __u; \
306 if (check) \
307 __read_once_size(&(x), __u.__c, sizeof(x)); \
308 else \
309 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
310 __u.__val; \
311 })
312 #define READ_ONCE(x) __READ_ONCE(x, 1)
313
314 /*
315 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
316 * to hide memory access from KASAN.
317 */
318 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
319
320 #define WRITE_ONCE(x, val) \
321 ({ \
322 union { typeof(x) __val; char __c[1]; } __u = \
323 { .__val = (__force typeof(x)) (val) }; \
324 __write_once_size(&(x), __u.__c, sizeof(x)); \
325 __u.__val; \
326 })
327
328 #endif /* __KERNEL__ */
329
330 #endif /* __ASSEMBLY__ */
331
332 #ifdef __KERNEL__
333 /*
334 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
335 * warning for each use, in hopes of speeding the functions removal.
336 * Usage is:
337 * int __deprecated foo(void)
338 */
339 #ifndef __deprecated
340 # define __deprecated /* unimplemented */
341 #endif
342
343 #ifdef MODULE
344 #define __deprecated_for_modules __deprecated
345 #else
346 #define __deprecated_for_modules
347 #endif
348
349 #ifndef __must_check
350 #define __must_check
351 #endif
352
353 #ifndef CONFIG_ENABLE_MUST_CHECK
354 #undef __must_check
355 #define __must_check
356 #endif
357 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
358 #undef __deprecated
359 #undef __deprecated_for_modules
360 #define __deprecated
361 #define __deprecated_for_modules
362 #endif
363
364 #ifndef __malloc
365 #define __malloc
366 #endif
367
368 /*
369 * Allow us to avoid 'defined but not used' warnings on functions and data,
370 * as well as force them to be emitted to the assembly file.
371 *
372 * As of gcc 3.4, static functions that are not marked with attribute((used))
373 * may be elided from the assembly file. As of gcc 3.4, static data not so
374 * marked will not be elided, but this may change in a future gcc version.
375 *
376 * NOTE: Because distributions shipped with a backported unit-at-a-time
377 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
378 * for gcc >=3.3 instead of 3.4.
379 *
380 * In prior versions of gcc, such functions and data would be emitted, but
381 * would be warned about except with attribute((unused)).
382 *
383 * Mark functions that are referenced only in inline assembly as __used so
384 * the code is emitted even though it appears to be unreferenced.
385 */
386 #ifndef __used
387 # define __used /* unimplemented */
388 #endif
389
390 #ifndef __maybe_unused
391 # define __maybe_unused /* unimplemented */
392 #endif
393
394 #ifndef __always_unused
395 # define __always_unused /* unimplemented */
396 #endif
397
398 #ifndef noinline
399 #define noinline
400 #endif
401
402 /*
403 * Rather then using noinline to prevent stack consumption, use
404 * noinline_for_stack instead. For documentation reasons.
405 */
406 #define noinline_for_stack noinline
407
408 #ifndef __always_inline
409 #define __always_inline inline
410 #endif
411
412 #endif /* __KERNEL__ */
413
414 /*
415 * From the GCC manual:
416 *
417 * Many functions do not examine any values except their arguments,
418 * and have no effects except the return value. Basically this is
419 * just slightly more strict class than the `pure' attribute above,
420 * since function is not allowed to read global memory.
421 *
422 * Note that a function that has pointer arguments and examines the
423 * data pointed to must _not_ be declared `const'. Likewise, a
424 * function that calls a non-`const' function usually must not be
425 * `const'. It does not make sense for a `const' function to return
426 * `void'.
427 */
428 #ifndef __attribute_const__
429 # define __attribute_const__ /* unimplemented */
430 #endif
431
432 #ifndef __latent_entropy
433 # define __latent_entropy
434 #endif
435
436 /*
437 * Tell gcc if a function is cold. The compiler will assume any path
438 * directly leading to the call is unlikely.
439 */
440
441 #ifndef __cold
442 #define __cold
443 #endif
444
445 /* Simple shorthand for a section definition */
446 #ifndef __section
447 # define __section(S) __attribute__ ((__section__(#S)))
448 #endif
449
450 #ifndef __visible
451 #define __visible
452 #endif
453
454 /*
455 * Assume alignment of return value.
456 */
457 #ifndef __assume_aligned
458 #define __assume_aligned(a, ...)
459 #endif
460
461
462 /* Are two types/vars the same type (ignoring qualifiers)? */
463 #ifndef __same_type
464 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
465 #endif
466
467 /* Is this type a native word size -- useful for atomic operations */
468 #ifndef __native_word
469 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
470 #endif
471
472 /* Compile time object size, -1 for unknown */
473 #ifndef __compiletime_object_size
474 # define __compiletime_object_size(obj) -1
475 #endif
476 #ifndef __compiletime_warning
477 # define __compiletime_warning(message)
478 #endif
479 #ifndef __compiletime_error
480 # define __compiletime_error(message)
481 /*
482 * Sparse complains of variable sized arrays due to the temporary variable in
483 * __compiletime_assert. Unfortunately we can't just expand it out to make
484 * sparse see a constant array size without breaking compiletime_assert on old
485 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
486 */
487 # ifndef __CHECKER__
488 # define __compiletime_error_fallback(condition) \
489 do { } while (0)
490 # endif
491 #endif
492 #ifndef __compiletime_error_fallback
493 # define __compiletime_error_fallback(condition) do { } while (0)
494 #endif
495
496 #define __compiletime_assert(condition, msg, prefix, suffix) \
497 do { \
498 bool __cond = !(condition); \
499 extern void prefix ## suffix(void) __compiletime_error(msg); \
500 if (__cond) \
501 prefix ## suffix(); \
502 __compiletime_error_fallback(__cond); \
503 } while (0)
504
505 #define _compiletime_assert(condition, msg, prefix, suffix) \
506 __compiletime_assert(condition, msg, prefix, suffix)
507
508 /**
509 * compiletime_assert - break build and emit msg if condition is false
510 * @condition: a compile-time constant condition to check
511 * @msg: a message to emit if condition is false
512 *
513 * In tradition of POSIX assert, this macro will break the build if the
514 * supplied condition is *false*, emitting the supplied error message if the
515 * compiler has support to do so.
516 */
517 #define compiletime_assert(condition, msg) \
518 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
519
520 #define compiletime_assert_atomic_type(t) \
521 compiletime_assert(__native_word(t), \
522 "Need native word sized stores/loads for atomicity.")
523
524 /*
525 * Prevent the compiler from merging or refetching accesses. The compiler
526 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
527 * but only when the compiler is aware of some particular ordering. One way
528 * to make the compiler aware of ordering is to put the two invocations of
529 * ACCESS_ONCE() in different C statements.
530 *
531 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
532 * on a union member will work as long as the size of the member matches the
533 * size of the union and the size is smaller than word size.
534 *
535 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
536 * between process-level code and irq/NMI handlers, all running on the same CPU,
537 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
538 * mutilate accesses that either do not require ordering or that interact
539 * with an explicit memory barrier or atomic instruction that provides the
540 * required ordering.
541 *
542 * If possible use READ_ONCE()/WRITE_ONCE() instead.
543 */
544 #define __ACCESS_ONCE(x) ({ \
545 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
546 (volatile typeof(x) *)&(x); })
547 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
548
549 /**
550 * lockless_dereference() - safely load a pointer for later dereference
551 * @p: The pointer to load
552 *
553 * Similar to rcu_dereference(), but for situations where the pointed-to
554 * object's lifetime is managed by something other than RCU. That
555 * "something other" might be reference counting or simple immortality.
556 *
557 * The seemingly unused variable ___typecheck_p validates that @p is
558 * indeed a pointer type by using a pointer to typeof(*p) as the type.
559 * Taking a pointer to typeof(*p) again is needed in case p is void *.
560 */
561 #define lockless_dereference(p) \
562 ({ \
563 typeof(p) _________p1 = READ_ONCE(p); \
564 typeof(*(p)) *___typecheck_p __maybe_unused; \
565 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
566 (_________p1); \
567 })
568
569 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
570 #ifdef CONFIG_KPROBES
571 # define __kprobes __attribute__((__section__(".kprobes.text")))
572 # define nokprobe_inline __always_inline
573 #else
574 # define __kprobes
575 # define nokprobe_inline inline
576 #endif
577 #endif /* __LINUX_COMPILER_H */ 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. NET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Ethernet handlers.
7 *
8 * Version: @(#)eth.h 1.0.4 05/13/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * Relocated to include/linux where it belongs by Alan Cox
14 * <gw4pts@gw4pts.ampr.org>
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22 #ifndef _LINUX_ETHERDEVICE_H
23 #define _LINUX_ETHERDEVICE_H
24
25 #include <linux/if_ether.h>
26 #include <linux/netdevice.h>
27 #include <linux/random.h>
28 #include <asm/unaligned.h>
29 #include <asm/bitsperlong.h>
30
31 #ifdef __KERNEL__
32 struct device;
33 int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
34 unsigned char *arch_get_platform_get_mac_address(void);
35 u32 eth_get_headlen(void *data, unsigned int max_len);
36 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
37 extern const struct header_ops eth_header_ops;
38
39 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
40 const void *daddr, const void *saddr, unsigned len);
41 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
42 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
43 __be16 type);
44 void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
45 const unsigned char *haddr);
46 int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
47 void eth_commit_mac_addr_change(struct net_device *dev, void *p);
48 int eth_mac_addr(struct net_device *dev, void *p);
49 int eth_change_mtu(struct net_device *dev, int new_mtu);
50 int eth_validate_addr(struct net_device *dev);
51
52 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
53 unsigned int rxqs);
54 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
55 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
56
57 struct sk_buff **eth_gro_receive(struct sk_buff **head,
58 struct sk_buff *skb);
59 int eth_gro_complete(struct sk_buff *skb, int nhoff);
60
61 /* Reserved Ethernet Addresses per IEEE 802.1Q */
62 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
63 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
64
65 /**
66 * is_link_local_ether_addr - Determine if given Ethernet address is link-local
67 * @addr: Pointer to a six-byte array containing the Ethernet address
68 *
69 * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
70 * IEEE 802.1Q 8.6.3 Frame filtering.
71 *
72 * Please note: addr must be aligned to u16.
73 */
74 static inline bool is_link_local_ether_addr(const u8 *addr)
75 {
76 __be16 *a = (__be16 *)addr;
77 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
78 static const __be16 m = cpu_to_be16(0xfff0);
79
80 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
81 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
82 (__force int)((a[2] ^ b[2]) & m)) == 0;
83 #else
84 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
85 #endif
86 }
87
88 /**
89 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
90 * @addr: Pointer to a six-byte array containing the Ethernet address
91 *
92 * Return true if the address is all zeroes.
93 *
94 * Please note: addr must be aligned to u16.
95 */
96 static inline bool is_zero_ether_addr(const u8 *addr)
97 {
98 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
99 return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
100 #else
101 return (*(const u16 *)(addr + 0) |
102 *(const u16 *)(addr + 2) |
103 *(const u16 *)(addr + 4)) == 0;
104 #endif
105 }
106
107 /**
108 * is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
109 * @addr: Pointer to a six-byte array containing the Ethernet address
110 *
111 * Return true if the address is a multicast address.
112 * By definition the broadcast address is also a multicast address.
113 */
114 static inline bool is_multicast_ether_addr(const u8 *addr)
115 {
116 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
117 u32 a = *(const u32 *)addr;
118 #else
119 u16 a = *(const u16 *)addr;
120 #endif
121 #ifdef __BIG_ENDIAN
122 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
123 #else
124 return 0x01 & a;
125 #endif
126 }
127
128 static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
129 {
130 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
131 #ifdef __BIG_ENDIAN
132 return 0x01 & ((*(const u64 *)addr) >> 56);
133 #else
134 return 0x01 & (*(const u64 *)addr);
135 #endif
136 #else
137 return is_multicast_ether_addr(addr);
138 #endif
139 }
140
141 /**
142 * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
143 * @addr: Pointer to a six-byte array containing the Ethernet address
144 *
145 * Return true if the address is a local address.
146 */
147 static inline bool is_local_ether_addr(const u8 *addr)
148 {
149 return 0x02 & addr[0];
150 }
151
152 /**
153 * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
154 * @addr: Pointer to a six-byte array containing the Ethernet address
155 *
156 * Return true if the address is the broadcast address.
157 *
158 * Please note: addr must be aligned to u16.
159 */
160 static inline bool is_broadcast_ether_addr(const u8 *addr)
161 {
162 return (*(const u16 *)(addr + 0) &
163 *(const u16 *)(addr + 2) &
164 *(const u16 *)(addr + 4)) == 0xffff;
165 }
166
167 /**
168 * is_unicast_ether_addr - Determine if the Ethernet address is unicast
169 * @addr: Pointer to a six-byte array containing the Ethernet address
170 *
171 * Return true if the address is a unicast address.
172 */
173 static inline bool is_unicast_ether_addr(const u8 *addr)
174 {
175 return !is_multicast_ether_addr(addr);
176 }
177
178 /**
179 * is_valid_ether_addr - Determine if the given Ethernet address is valid
180 * @addr: Pointer to a six-byte array containing the Ethernet address
181 *
182 * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
183 * a multicast address, and is not FF:FF:FF:FF:FF:FF.
184 *
185 * Return true if the address is valid.
186 *
187 * Please note: addr must be aligned to u16.
188 */
189 static inline bool is_valid_ether_addr(const u8 *addr)
190 {
191 /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to
192 * explicitly check for it here. */
193 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
194 }
195
196 /**
197 * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
198 * @proto: Ethertype/length value to be tested
199 *
200 * Check that the value from the Ethertype/length field is a valid Ethertype.
201 *
202 * Return true if the valid is an 802.3 supported Ethertype.
203 */
204 static inline bool eth_proto_is_802_3(__be16 proto)
205 {
206 #ifndef __BIG_ENDIAN
207 /* if CPU is little endian mask off bits representing LSB */
208 proto &= htons(0xFF00);
209 #endif
210 /* cast both to u16 and compare since LSB can be ignored */
211 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
212 }
213
214 /**
215 * eth_random_addr - Generate software assigned random Ethernet address
216 * @addr: Pointer to a six-byte array containing the Ethernet address
217 *
218 * Generate a random Ethernet address (MAC) that is not multicast
219 * and has the local assigned bit set.
220 */
221 static inline void eth_random_addr(u8 *addr)
222 {
223 get_random_bytes(addr, ETH_ALEN);
224 addr[0] &= 0xfe; /* clear multicast bit */
225 addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
226 }
227
228 #define random_ether_addr(addr) eth_random_addr(addr)
229
230 /**
231 * eth_broadcast_addr - Assign broadcast address
232 * @addr: Pointer to a six-byte array containing the Ethernet address
233 *
234 * Assign the broadcast address to the given address array.
235 */
236 static inline void eth_broadcast_addr(u8 *addr)
237 {
238 memset(addr, 0xff, ETH_ALEN);
239 }
240
241 /**
242 * eth_zero_addr - Assign zero address
243 * @addr: Pointer to a six-byte array containing the Ethernet address
244 *
245 * Assign the zero address to the given address array.
246 */
247 static inline void eth_zero_addr(u8 *addr)
248 {
249 memset(addr, 0x00, ETH_ALEN);
250 }
251
252 /**
253 * eth_hw_addr_random - Generate software assigned random Ethernet and
254 * set device flag
255 * @dev: pointer to net_device structure
256 *
257 * Generate a random Ethernet address (MAC) to be used by a net device
258 * and set addr_assign_type so the state can be read by sysfs and be
259 * used by userspace.
260 */
261 static inline void eth_hw_addr_random(struct net_device *dev)
262 {
263 dev->addr_assign_type = NET_ADDR_RANDOM;
264 eth_random_addr(dev->dev_addr);
265 }
266
267 /**
268 * ether_addr_copy - Copy an Ethernet address
269 * @dst: Pointer to a six-byte array Ethernet address destination
270 * @src: Pointer to a six-byte array Ethernet address source
271 *
272 * Please note: dst & src must both be aligned to u16.
273 */
274 static inline void ether_addr_copy(u8 *dst, const u8 *src)
275 {
276 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
277 *(u32 *)dst = *(const u32 *)src;
278 *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
279 #else
280 u16 *a = (u16 *)dst;
281 const u16 *b = (const u16 *)src;
282
283 a[0] = b[0];
284 a[1] = b[1];
285 a[2] = b[2];
286 #endif
287 }
288
289 /**
290 * eth_hw_addr_inherit - Copy dev_addr from another net_device
291 * @dst: pointer to net_device to copy dev_addr to
292 * @src: pointer to net_device to copy dev_addr from
293 *
294 * Copy the Ethernet address from one net_device to another along with
295 * the address attributes (addr_assign_type).
296 */
297 static inline void eth_hw_addr_inherit(struct net_device *dst,
298 struct net_device *src)
299 {
300 dst->addr_assign_type = src->addr_assign_type;
301 ether_addr_copy(dst->dev_addr, src->dev_addr);
302 }
303
304 /**
305 * ether_addr_equal - Compare two Ethernet addresses
306 * @addr1: Pointer to a six-byte array containing the Ethernet address
307 * @addr2: Pointer other six-byte array containing the Ethernet address
308 *
309 * Compare two Ethernet addresses, returns true if equal
310 *
311 * Please note: addr1 & addr2 must both be aligned to u16.
312 */
313 static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
314 {
315 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
316 u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
317 ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
318
319 return fold == 0;
320 #else
321 const u16 *a = (const u16 *)addr1;
322 const u16 *b = (const u16 *)addr2;
323
324 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
325 #endif
326 }
327
328 /**
329 * ether_addr_equal_64bits - Compare two Ethernet addresses
330 * @addr1: Pointer to an array of 8 bytes
331 * @addr2: Pointer to an other array of 8 bytes
332 *
333 * Compare two Ethernet addresses, returns true if equal, false otherwise.
334 *
335 * The function doesn't need any conditional branches and possibly uses
336 * word memory accesses on CPU allowing cheap unaligned memory reads.
337 * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 }
338 *
339 * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
340 */
341
342 static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
343 const u8 addr2[6+2])
344 {
345 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
346 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
347
348 #ifdef __BIG_ENDIAN
349 return (fold >> 16) == 0;
350 #else
351 return (fold << 16) == 0;
352 #endif
353 #else
354 return ether_addr_equal(addr1, addr2);
355 #endif
356 }
357
358 /**
359 * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses
360 * @addr1: Pointer to a six-byte array containing the Ethernet address
361 * @addr2: Pointer other six-byte array containing the Ethernet address
362 *
363 * Compare two Ethernet addresses, returns true if equal
364 *
365 * Please note: Use only when any Ethernet address may not be u16 aligned.
366 */
367 static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
368 {
369 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
370 return ether_addr_equal(addr1, addr2);
371 #else
372 return memcmp(addr1, addr2, ETH_ALEN) == 0;
373 #endif
374 }
375
376 /**
377 * ether_addr_equal_masked - Compare two Ethernet addresses with a mask
378 * @addr1: Pointer to a six-byte array containing the 1st Ethernet address
379 * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address
380 * @mask: Pointer to a six-byte array containing the Ethernet address bitmask
381 *
382 * Compare two Ethernet addresses with a mask, returns true if for every bit
383 * set in the bitmask the equivalent bits in the ethernet addresses are equal.
384 * Using a mask with all bits set is a slower ether_addr_equal.
385 */
386 static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
387 const u8 *mask)
388 {
389 int i;
390
391 for (i = 0; i < ETH_ALEN; i++) {
392 if ((addr1[i] ^ addr2[i]) & mask[i])
393 return false;
394 }
395
396 return true;
397 }
398
399 /**
400 * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
401 * @dev: Pointer to a device structure
402 * @addr: Pointer to a six-byte array containing the Ethernet address
403 *
404 * Compare passed address with all addresses of the device. Return true if the
405 * address if one of the device addresses.
406 *
407 * Note that this function calls ether_addr_equal_64bits() so take care of
408 * the right padding.
409 */
410 static inline bool is_etherdev_addr(const struct net_device *dev,
411 const u8 addr[6 + 2])
412 {
413 struct netdev_hw_addr *ha;
414 bool res = false;
415
416 rcu_read_lock();
417 for_each_dev_addr(dev, ha) {
418 res = ether_addr_equal_64bits(addr, ha->addr);
419 if (res)
420 break;
421 }
422 rcu_read_unlock();
423 return res;
424 }
425 #endif /* __KERNEL__ */
426
427 /**
428 * compare_ether_header - Compare two Ethernet headers
429 * @a: Pointer to Ethernet header
430 * @b: Pointer to Ethernet header
431 *
432 * Compare two Ethernet headers, returns 0 if equal.
433 * This assumes that the network header (i.e., IP header) is 4-byte
434 * aligned OR the platform can handle unaligned access. This is the
435 * case for all packets coming into netif_receive_skb or similar
436 * entry points.
437 */
438
439 static inline unsigned long compare_ether_header(const void *a, const void *b)
440 {
441 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
442 unsigned long fold;
443
444 /*
445 * We want to compare 14 bytes:
446 * [a0 ... a13] ^ [b0 ... b13]
447 * Use two long XOR, ORed together, with an overlap of two bytes.
448 * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
449 * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
450 * This means the [a6 a7] ^ [b6 b7] part is done two times.
451 */
452 fold = *(unsigned long *)a ^ *(unsigned long *)b;
453 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
454 return fold;
455 #else
456 u32 *a32 = (u32 *)((u8 *)a + 2);
457 u32 *b32 = (u32 *)((u8 *)b + 2);
458
459 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
460 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
461 #endif
462 }
463
464 /**
465 * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
466 * @skb: Buffer to pad
467 *
468 * An Ethernet frame should have a minimum size of 60 bytes. This function
469 * takes short frames and pads them with zeros up to the 60 byte limit.
470 */
471 static inline int eth_skb_pad(struct sk_buff *skb)
472 {
473 return skb_put_padto(skb, ETH_ZLEN);
474 }
475
476 #endif /* _LINUX_ETHERDEVICE_H */ 1 /*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16
17 #include <linux/kernel.h>
18 #include <linux/kmemcheck.h>
19 #include <linux/compiler.h>
20 #include <linux/time.h>
21 #include <linux/bug.h>
22 #include <linux/cache.h>
23 #include <linux/rbtree.h>
24 #include <linux/socket.h>
25
26 #include <linux/atomic.h>
27 #include <asm/types.h>
28 #include <linux/spinlock.h>
29 #include <linux/net.h>
30 #include <linux/textsearch.h>
31 #include <net/checksum.h>
32 #include <linux/rcupdate.h>
33 #include <linux/hrtimer.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/netdev_features.h>
36 #include <linux/sched.h>
37 #include <net/flow_dissector.h>
38 #include <linux/splice.h>
39 #include <linux/in6.h>
40 #include <linux/if_packet.h>
41 #include <net/flow.h>
42
43 /* The interface for checksum offload between the stack and networking drivers
44 * is as follows...
45 *
46 * A. IP checksum related features
47 *
48 * Drivers advertise checksum offload capabilities in the features of a device.
49 * From the stack's point of view these are capabilities offered by the driver,
50 * a driver typically only advertises features that it is capable of offloading
51 * to its device.
52 *
53 * The checksum related features are:
54 *
55 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
56 * IP (one's complement) checksum for any combination
57 * of protocols or protocol layering. The checksum is
58 * computed and set in a packet per the CHECKSUM_PARTIAL
59 * interface (see below).
60 *
61 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
62 * TCP or UDP packets over IPv4. These are specifically
63 * unencapsulated packets of the form IPv4|TCP or
64 * IPv4|UDP where the Protocol field in the IPv4 header
65 * is TCP or UDP. The IPv4 header may contain IP options
66 * This feature cannot be set in features for a device
67 * with NETIF_F_HW_CSUM also set. This feature is being
68 * DEPRECATED (see below).
69 *
70 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
71 * TCP or UDP packets over IPv6. These are specifically
72 * unencapsulated packets of the form IPv6|TCP or
73 * IPv4|UDP where the Next Header field in the IPv6
74 * header is either TCP or UDP. IPv6 extension headers
75 * are not supported with this feature. This feature
76 * cannot be set in features for a device with
77 * NETIF_F_HW_CSUM also set. This feature is being
78 * DEPRECATED (see below).
79 *
80 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
81 * This flag is used only used to disable the RX checksum
82 * feature for a device. The stack will accept receive
83 * checksum indication in packets received on a device
84 * regardless of whether NETIF_F_RXCSUM is set.
85 *
86 * B. Checksumming of received packets by device. Indication of checksum
87 * verification is in set skb->ip_summed. Possible values are:
88 *
89 * CHECKSUM_NONE:
90 *
91 * Device did not checksum this packet e.g. due to lack of capabilities.
92 * The packet contains full (though not verified) checksum in packet but
93 * not in skb->csum. Thus, skb->csum is undefined in this case.
94 *
95 * CHECKSUM_UNNECESSARY:
96 *
97 * The hardware you're dealing with doesn't calculate the full checksum
98 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
99 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
100 * if their checksums are okay. skb->csum is still undefined in this case
101 * though. A driver or device must never modify the checksum field in the
102 * packet even if checksum is verified.
103 *
104 * CHECKSUM_UNNECESSARY is applicable to following protocols:
105 * TCP: IPv6 and IPv4.
106 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
107 * zero UDP checksum for either IPv4 or IPv6, the networking stack
108 * may perform further validation in this case.
109 * GRE: only if the checksum is present in the header.
110 * SCTP: indicates the CRC in SCTP header has been validated.
111 *
112 * skb->csum_level indicates the number of consecutive checksums found in
113 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
114 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
115 * and a device is able to verify the checksums for UDP (possibly zero),
116 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
117 * two. If the device were only able to verify the UDP checksum and not
118 * GRE, either because it doesn't support GRE checksum of because GRE
119 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
120 * not considered in this case).
121 *
122 * CHECKSUM_COMPLETE:
123 *
124 * This is the most generic way. The device supplied checksum of the _whole_
125 * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
126 * hardware doesn't need to parse L3/L4 headers to implement this.
127 *
128 * Note: Even if device supports only some protocols, but is able to produce
129 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
130 *
131 * CHECKSUM_PARTIAL:
132 *
133 * A checksum is set up to be offloaded to a device as described in the
134 * output description for CHECKSUM_PARTIAL. This may occur on a packet
135 * received directly from another Linux OS, e.g., a virtualized Linux kernel
136 * on the same host, or it may be set in the input path in GRO or remote
137 * checksum offload. For the purposes of checksum verification, the checksum
138 * referred to by skb->csum_start + skb->csum_offset and any preceding
139 * checksums in the packet are considered verified. Any checksums in the
140 * packet that are after the checksum being offloaded are not considered to
141 * be verified.
142 *
143 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
144 * in the skb->ip_summed for a packet. Values are:
145 *
146 * CHECKSUM_PARTIAL:
147 *
148 * The driver is required to checksum the packet as seen by hard_start_xmit()
149 * from skb->csum_start up to the end, and to record/write the checksum at
150 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
151 * csum_start and csum_offset values are valid values given the length and
152 * offset of the packet, however they should not attempt to validate that the
153 * checksum refers to a legitimate transport layer checksum-- it is the
154 * purview of the stack to validate that csum_start and csum_offset are set
155 * correctly.
156 *
157 * When the stack requests checksum offload for a packet, the driver MUST
158 * ensure that the checksum is set correctly. A driver can either offload the
159 * checksum calculation to the device, or call skb_checksum_help (in the case
160 * that the device does not support offload for a particular checksum).
161 *
162 * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
163 * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
164 * checksum offload capability. If a device has limited checksum capabilities
165 * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
166 * described above) a helper function can be called to resolve
167 * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
168 * function takes a spec argument that describes the protocol layer that is
169 * supported for checksum offload and can be called for each packet. If a
170 * packet does not match the specification for offload, skb_checksum_help
171 * is called to resolve the checksum.
172 *
173 * CHECKSUM_NONE:
174 *
175 * The skb was already checksummed by the protocol, or a checksum is not
176 * required.
177 *
178 * CHECKSUM_UNNECESSARY:
179 *
180 * This has the same meaning on as CHECKSUM_NONE for checksum offload on
181 * output.
182 *
183 * CHECKSUM_COMPLETE:
184 * Not used in checksum output. If a driver observes a packet with this value
185 * set in skbuff, if should treat as CHECKSUM_NONE being set.
186 *
187 * D. Non-IP checksum (CRC) offloads
188 *
189 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
190 * offloading the SCTP CRC in a packet. To perform this offload the stack
191 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
192 * accordingly. Note the there is no indication in the skbuff that the
193 * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
194 * both IP checksum offload and SCTP CRC offload must verify which offload
195 * is configured for a packet presumably by inspecting packet headers.
196 *
197 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
198 * offloading the FCOE CRC in a packet. To perform this offload the stack
199 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
200 * accordingly. Note the there is no indication in the skbuff that the
201 * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
202 * both IP checksum offload and FCOE CRC offload must verify which offload
203 * is configured for a packet presumably by inspecting packet headers.
204 *
205 * E. Checksumming on output with GSO.
206 *
207 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
208 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
209 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
210 * part of the GSO operation is implied. If a checksum is being offloaded
211 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
212 * are set to refer to the outermost checksum being offload (two offloaded
213 * checksums are possible with UDP encapsulation).
214 */
215
216 /* Don't change this without changing skb_csum_unnecessary! */
217 #define CHECKSUM_NONE 0
218 #define CHECKSUM_UNNECESSARY 1
219 #define CHECKSUM_COMPLETE 2
220 #define CHECKSUM_PARTIAL 3
221
222 /* Maximum value in skb->csum_level */
223 #define SKB_MAX_CSUM_LEVEL 3
224
225 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
226 #define SKB_WITH_OVERHEAD(X) \
227 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
228 #define SKB_MAX_ORDER(X, ORDER) \
229 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
230 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
231 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
232
233 /* return minimum truesize of one skb containing X bytes of data */
234 #define SKB_TRUESIZE(X) ((X) + \
235 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
236 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
237
238 struct net_device;
239 struct scatterlist;
240 struct pipe_inode_info;
241 struct iov_iter;
242 struct napi_struct;
243
244 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
245 struct nf_conntrack {
246 atomic_t use;
247 };
248 #endif
249
250 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
251 struct nf_bridge_info {
252 atomic_t use;
253 enum {
254 BRNF_PROTO_UNCHANGED,
255 BRNF_PROTO_8021Q,
256 BRNF_PROTO_PPPOE
257 } orig_proto:8;
258 u8 pkt_otherhost:1;
259 u8 in_prerouting:1;
260 u8 bridged_dnat:1;
261 __u16 frag_max_size;
262 struct net_device *physindev;
263
264 /* always valid & non-NULL from FORWARD on, for physdev match */
265 struct net_device *physoutdev;
266 union {
267 /* prerouting: detect dnat in orig/reply direction */
268 __be32 ipv4_daddr;
269 struct in6_addr ipv6_daddr;
270
271 /* after prerouting + nat detected: store original source
272 * mac since neigh resolution overwrites it, only used while
273 * skb is out in neigh layer.
274 */
275 char neigh_header[8];
276 };
277 };
278 #endif
279
280 struct sk_buff_head {
281 /* These two members must be first. */
282 struct sk_buff *next;
283 struct sk_buff *prev;
284
285 __u32 qlen;
286 spinlock_t lock;
287 };
288
289 struct sk_buff;
290
291 /* To allow 64K frame to be packed as single skb without frag_list we
292 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
293 * buffers which do not start on a page boundary.
294 *
295 * Since GRO uses frags we allocate at least 16 regardless of page
296 * size.
297 */
298 #if (65536/PAGE_SIZE + 1) < 16
299 #define MAX_SKB_FRAGS 16UL
300 #else
301 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
302 #endif
303 extern int sysctl_max_skb_frags;
304
305 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
306 * segment using its current segmentation instead.
307 */
308 #define GSO_BY_FRAGS 0xFFFF
309
310 typedef struct skb_frag_struct skb_frag_t;
311
312 struct skb_frag_struct {
313 struct {
314 struct page *p;
315 } page;
316 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
317 __u32 page_offset;
318 __u32 size;
319 #else
320 __u16 page_offset;
321 __u16 size;
322 #endif
323 };
324
325 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
326 {
327 return frag->size;
328 }
329
330 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
331 {
332 frag->size = size;
333 }
334
335 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
336 {
337 frag->size += delta;
338 }
339
340 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
341 {
342 frag->size -= delta;
343 }
344
345 #define HAVE_HW_TIME_STAMP
346
347 /**
348 * struct skb_shared_hwtstamps - hardware time stamps
349 * @hwtstamp: hardware time stamp transformed into duration
350 * since arbitrary point in time
351 *
352 * Software time stamps generated by ktime_get_real() are stored in
353 * skb->tstamp.
354 *
355 * hwtstamps can only be compared against other hwtstamps from
356 * the same device.
357 *
358 * This structure is attached to packets as part of the
359 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
360 */
361 struct skb_shared_hwtstamps {
362 ktime_t hwtstamp;
363 };
364
365 /* Definitions for tx_flags in struct skb_shared_info */
366 enum {
367 /* generate hardware time stamp */
368 SKBTX_HW_TSTAMP = 1 << 0,
369
370 /* generate software time stamp when queueing packet to NIC */
371 SKBTX_SW_TSTAMP = 1 << 1,
372
373 /* device driver is going to provide hardware time stamp */
374 SKBTX_IN_PROGRESS = 1 << 2,
375
376 /* device driver supports TX zero-copy buffers */
377 SKBTX_DEV_ZEROCOPY = 1 << 3,
378
379 /* generate wifi status information (where possible) */
380 SKBTX_WIFI_STATUS = 1 << 4,
381
382 /* This indicates at least one fragment might be overwritten
383 * (as in vmsplice(), sendfile() ...)
384 * If we need to compute a TX checksum, we'll need to copy
385 * all frags to avoid possible bad checksum
386 */
387 SKBTX_SHARED_FRAG = 1 << 5,
388
389 /* generate software time stamp when entering packet scheduling */
390 SKBTX_SCHED_TSTAMP = 1 << 6,
391 };
392
393 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
394 SKBTX_SCHED_TSTAMP)
395 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
396
397 /*
398 * The callback notifies userspace to release buffers when skb DMA is done in
399 * lower device, the skb last reference should be 0 when calling this.
400 * The zerocopy_success argument is true if zero copy transmit occurred,
401 * false on data copy or out of memory error caused by data copy attempt.
402 * The ctx field is used to track device context.
403 * The desc field is used to track userspace buffer index.
404 */
405 struct ubuf_info {
406 void (*callback)(struct ubuf_info *, bool zerocopy_success);
407 void *ctx;
408 unsigned long desc;
409 };
410
411 /* This data is invariant across clones and lives at
412 * the end of the header data, ie. at skb->end.
413 */
414 struct skb_shared_info {
415 unsigned char nr_frags;
416 __u8 tx_flags;
417 unsigned short gso_size;
418 /* Warning: this field is not always filled in (UFO)! */
419 unsigned short gso_segs;
420 unsigned short gso_type;
421 struct sk_buff *frag_list;
422 struct skb_shared_hwtstamps hwtstamps;
423 u32 tskey;
424 __be32 ip6_frag_id;
425
426 /*
427 * Warning : all fields before dataref are cleared in __alloc_skb()
428 */
429 atomic_t dataref;
430
431 /* Intermediate layers must ensure that destructor_arg
432 * remains valid until skb destructor */
433 void * destructor_arg;
434
435 /* must be last field, see pskb_expand_head() */
436 skb_frag_t frags[MAX_SKB_FRAGS];
437 };
438
439 /* We divide dataref into two halves. The higher 16 bits hold references
440 * to the payload part of skb->data. The lower 16 bits hold references to
441 * the entire skb->data. A clone of a headerless skb holds the length of
442 * the header in skb->hdr_len.
443 *
444 * All users must obey the rule that the skb->data reference count must be
445 * greater than or equal to the payload reference count.
446 *
447 * Holding a reference to the payload part means that the user does not
448 * care about modifications to the header part of skb->data.
449 */
450 #define SKB_DATAREF_SHIFT 16
451 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
452
453
454 enum {
455 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
456 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
457 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
458 };
459
460 enum {
461 SKB_GSO_TCPV4 = 1 << 0,
462 SKB_GSO_UDP = 1 << 1,
463
464 /* This indicates the skb is from an untrusted source. */
465 SKB_GSO_DODGY = 1 << 2,
466
467 /* This indicates the tcp segment has CWR set. */
468 SKB_GSO_TCP_ECN = 1 << 3,
469
470 SKB_GSO_TCP_FIXEDID = 1 << 4,
471
472 SKB_GSO_TCPV6 = 1 << 5,
473
474 SKB_GSO_FCOE = 1 << 6,
475
476 SKB_GSO_GRE = 1 << 7,
477
478 SKB_GSO_GRE_CSUM = 1 << 8,
479
480 SKB_GSO_IPXIP4 = 1 << 9,
481
482 SKB_GSO_IPXIP6 = 1 << 10,
483
484 SKB_GSO_UDP_TUNNEL = 1 << 11,
485
486 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
487
488 SKB_GSO_PARTIAL = 1 << 13,
489
490 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
491
492 SKB_GSO_SCTP = 1 << 15,
493 };
494
495 #if BITS_PER_LONG > 32
496 #define NET_SKBUFF_DATA_USES_OFFSET 1
497 #endif
498
499 #ifdef NET_SKBUFF_DATA_USES_OFFSET
500 typedef unsigned int sk_buff_data_t;
501 #else
502 typedef unsigned char *sk_buff_data_t;
503 #endif
504
505 /**
506 * struct skb_mstamp - multi resolution time stamps
507 * @stamp_us: timestamp in us resolution
508 * @stamp_jiffies: timestamp in jiffies
509 */
510 struct skb_mstamp {
511 union {
512 u64 v64;
513 struct {
514 u32 stamp_us;
515 u32 stamp_jiffies;
516 };
517 };
518 };
519
520 /**
521 * skb_mstamp_get - get current timestamp
522 * @cl: place to store timestamps
523 */
524 static inline void skb_mstamp_get(struct skb_mstamp *cl)
525 {
526 u64 val = local_clock();
527
528 do_div(val, NSEC_PER_USEC);
529 cl->stamp_us = (u32)val;
530 cl->stamp_jiffies = (u32)jiffies;
531 }
532
533 /**
534 * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
535 * @t1: pointer to newest sample
536 * @t0: pointer to oldest sample
537 */
538 static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
539 const struct skb_mstamp *t0)
540 {
541 s32 delta_us = t1->stamp_us - t0->stamp_us;
542 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
543
544 /* If delta_us is negative, this might be because interval is too big,
545 * or local_clock() drift is too big : fallback using jiffies.
546 */
547 if (delta_us <= 0 ||
548 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
549
550 delta_us = jiffies_to_usecs(delta_jiffies);
551
552 return delta_us;
553 }
554
555 static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
556 const struct skb_mstamp *t0)
557 {
558 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
559
560 if (!diff)
561 diff = t1->stamp_us - t0->stamp_us;
562 return diff > 0;
563 }
564
565 /**
566 * struct sk_buff - socket buffer
567 * @next: Next buffer in list
568 * @prev: Previous buffer in list
569 * @tstamp: Time we arrived/left
570 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
571 * @sk: Socket we are owned by
572 * @dev: Device we arrived on/are leaving by
573 * @cb: Control buffer. Free for use by every layer. Put private vars here
574 * @_skb_refdst: destination entry (with norefcount bit)
575 * @sp: the security path, used for xfrm
576 * @len: Length of actual data
577 * @data_len: Data length
578 * @mac_len: Length of link layer header
579 * @hdr_len: writable header length of cloned skb
580 * @csum: Checksum (must include start/offset pair)
581 * @csum_start: Offset from skb->head where checksumming should start
582 * @csum_offset: Offset from csum_start where checksum should be stored
583 * @priority: Packet queueing priority
584 * @ignore_df: allow local fragmentation
585 * @cloned: Head may be cloned (check refcnt to be sure)
586 * @ip_summed: Driver fed us an IP checksum
587 * @nohdr: Payload reference only, must not modify header
588 * @nfctinfo: Relationship of this skb to the connection
589 * @pkt_type: Packet class
590 * @fclone: skbuff clone status
591 * @ipvs_property: skbuff is owned by ipvs
592 * @peeked: this packet has been seen already, so stats have been
593 * done for it, don't do them again
594 * @nf_trace: netfilter packet trace flag
595 * @protocol: Packet protocol from driver
596 * @destructor: Destruct function
597 * @nfct: Associated connection, if any
598 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
599 * @skb_iif: ifindex of device we arrived on
600 * @tc_index: Traffic control index
601 * @tc_verd: traffic control verdict
602 * @hash: the packet hash
603 * @queue_mapping: Queue mapping for multiqueue devices
604 * @xmit_more: More SKBs are pending for this queue
605 * @ndisc_nodetype: router type (from link layer)
606 * @ooo_okay: allow the mapping of a socket to a queue to be changed
607 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
608 * ports.
609 * @sw_hash: indicates hash was computed in software stack
610 * @wifi_acked_valid: wifi_acked was set
611 * @wifi_acked: whether frame was acked on wifi or not
612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
613 * @napi_id: id of the NAPI struct this skb came from
614 * @secmark: security marking
615 * @mark: Generic packet mark
616 * @vlan_proto: vlan encapsulation protocol
617 * @vlan_tci: vlan tag control information
618 * @inner_protocol: Protocol (encapsulation)
619 * @inner_transport_header: Inner transport layer header (encapsulation)
620 * @inner_network_header: Network layer header (encapsulation)
621 * @inner_mac_header: Link layer header (encapsulation)
622 * @transport_header: Transport layer header
623 * @network_header: Network layer header
624 * @mac_header: Link layer header
625 * @tail: Tail pointer
626 * @end: End pointer
627 * @head: Head of buffer
628 * @data: Data head pointer
629 * @truesize: Buffer size
630 * @users: User count - see {datagram,tcp}.c
631 */
632
633 struct sk_buff {
634 union {
635 struct {
636 /* These two members must be first. */
637 struct sk_buff *next;
638 struct sk_buff *prev;
639
640 union {
641 ktime_t tstamp;
642 struct skb_mstamp skb_mstamp;
643 };
644 };
645 struct rb_node rbnode; /* used in netem & tcp stack */
646 };
647 struct sock *sk;
648
649 union {
650 struct net_device *dev;
651 /* Some protocols might use this space to store information,
652 * while device pointer would be NULL.
653 * UDP receive path is one user.
654 */
655 unsigned long dev_scratch;
656 };
657 /*
658 * This is the control buffer. It is free to use for every
659 * layer. Please put your private variables there. If you
660 * want to keep them across layers you have to do a skb_clone()
661 * first. This is owned by whoever has the skb queued ATM.
662 */
663 char cb[48] __aligned(8);
664
665 unsigned long _skb_refdst;
666 void (*destructor)(struct sk_buff *skb);
667 #ifdef CONFIG_XFRM
668 struct sec_path *sp;
669 #endif
670 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
671 struct nf_conntrack *nfct;
672 #endif
673 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
674 struct nf_bridge_info *nf_bridge;
675 #endif
676 unsigned int len,
677 data_len;
678 __u16 mac_len,
679 hdr_len;
680
681 /* Following fields are _not_ copied in __copy_skb_header()
682 * Note that queue_mapping is here mostly to fill a hole.
683 */
684 kmemcheck_bitfield_begin(flags1);
685 __u16 queue_mapping;
686
687 /* if you move cloned around you also must adapt those constants */
688 #ifdef __BIG_ENDIAN_BITFIELD
689 #define CLONED_MASK (1 << 7)
690 #else
691 #define CLONED_MASK 1
692 #endif
693 #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
694
695 __u8 __cloned_offset[0];
696 __u8 cloned:1,
697 nohdr:1,
698 fclone:2,
699 peeked:1,
700 head_frag:1,
701 xmit_more:1,
702 __unused:1; /* one bit hole */
703 kmemcheck_bitfield_end(flags1);
704
705 /* fields enclosed in headers_start/headers_end are copied
706 * using a single memcpy() in __copy_skb_header()
707 */
708 /* private: */
709 __u32 headers_start[0];
710 /* public: */
711
712 /* if you move pkt_type around you also must adapt those constants */
713 #ifdef __BIG_ENDIAN_BITFIELD
714 #define PKT_TYPE_MAX (7 << 5)
715 #else
716 #define PKT_TYPE_MAX 7
717 #endif
718 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
719
720 __u8 __pkt_type_offset[0];
721 __u8 pkt_type:3;
722 __u8 pfmemalloc:1;
723 __u8 ignore_df:1;
724 __u8 nfctinfo:3;
725
726 __u8 nf_trace:1;
727 __u8 ip_summed:2;
728 __u8 ooo_okay:1;
729 __u8 l4_hash:1;
730 __u8 sw_hash:1;
731 __u8 wifi_acked_valid:1;
732 __u8 wifi_acked:1;
733
734 __u8 no_fcs:1;
735 /* Indicates the inner headers are valid in the skbuff. */
736 __u8 encapsulation:1;
737 __u8 encap_hdr_csum:1;
738 __u8 csum_valid:1;
739 __u8 csum_complete_sw:1;
740 __u8 csum_level:2;
741 __u8 csum_bad:1;
742
743 #ifdef CONFIG_IPV6_NDISC_NODETYPE
744 __u8 ndisc_nodetype:2;
745 #endif
746 __u8 ipvs_property:1;
747 __u8 inner_protocol_type:1;
748 __u8 remcsum_offload:1;
749 #ifdef CONFIG_NET_SWITCHDEV
750 __u8 offload_fwd_mark:1;
751 #endif
752 /* 2, 4 or 5 bit hole */
753
754 #ifdef CONFIG_NET_SCHED
755 __u16 tc_index; /* traffic control index */
756 #ifdef CONFIG_NET_CLS_ACT
757 __u16 tc_verd; /* traffic control verdict */
758 #endif
759 #endif
760
761 union {
762 __wsum csum;
763 struct {
764 __u16 csum_start;
765 __u16 csum_offset;
766 };
767 };
768 __u32 priority;
769 int skb_iif;
770 __u32 hash;
771 __be16 vlan_proto;
772 __u16 vlan_tci;
773 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
774 union {
775 unsigned int napi_id;
776 unsigned int sender_cpu;
777 };
778 #endif
779 #ifdef CONFIG_NETWORK_SECMARK
780 __u32 secmark;
781 #endif
782
783 union {
784 __u32 mark;
785 __u32 reserved_tailroom;
786 };
787
788 union {
789 __be16 inner_protocol;
790 __u8 inner_ipproto;
791 };
792
793 __u16 inner_transport_header;
794 __u16 inner_network_header;
795 __u16 inner_mac_header;
796
797 __be16 protocol;
798 __u16 transport_header;
799 __u16 network_header;
800 __u16 mac_header;
801
802 /* private: */
803 __u32 headers_end[0];
804 /* public: */
805
806 /* These elements must be at the end, see alloc_skb() for details. */
807 sk_buff_data_t tail;
808 sk_buff_data_t end;
809 unsigned char *head,
810 *data;
811 unsigned int truesize;
812 atomic_t users;
813 };
814
815 #ifdef __KERNEL__
816 /*
817 * Handling routines are only of interest to the kernel
818 */
819 #include <linux/slab.h>
820
821
822 #define SKB_ALLOC_FCLONE 0x01
823 #define SKB_ALLOC_RX 0x02
824 #define SKB_ALLOC_NAPI 0x04
825
826 /* Returns true if the skb was allocated from PFMEMALLOC reserves */
827 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
828 {
829 return unlikely(skb->pfmemalloc);
830 }
831
832 /*
833 * skb might have a dst pointer attached, refcounted or not.
834 * _skb_refdst low order bit is set if refcount was _not_ taken
835 */
836 #define SKB_DST_NOREF 1UL
837 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
838
839 /**
840 * skb_dst - returns skb dst_entry
841 * @skb: buffer
842 *
843 * Returns skb dst_entry, regardless of reference taken or not.
844 */
845 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
846 {
847 /* If refdst was not refcounted, check we still are in a
848 * rcu_read_lock section
849 */
850 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
851 !rcu_read_lock_held() &&
852 !rcu_read_lock_bh_held());
853 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
854 }
855
856 /**
857 * skb_dst_set - sets skb dst
858 * @skb: buffer
859 * @dst: dst entry
860 *
861 * Sets skb dst, assuming a reference was taken on dst and should
862 * be released by skb_dst_drop()
863 */
864 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
865 {
866 skb->_skb_refdst = (unsigned long)dst;
867 }
868
869 /**
870 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
871 * @skb: buffer
872 * @dst: dst entry
873 *
874 * Sets skb dst, assuming a reference was not taken on dst.
875 * If dst entry is cached, we do not take reference and dst_release
876 * will be avoided by refdst_drop. If dst entry is not cached, we take
877 * reference, so that last dst_release can destroy the dst immediately.
878 */
879 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
880 {
881 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
882 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
883 }
884
885 /**
886 * skb_dst_is_noref - Test if skb dst isn't refcounted
887 * @skb: buffer
888 */
889 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
890 {
891 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
892 }
893
894 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
895 {
896 return (struct rtable *)skb_dst(skb);
897 }
898
899 /* For mangling skb->pkt_type from user space side from applications
900 * such as nft, tc, etc, we only allow a conservative subset of
901 * possible pkt_types to be set.
902 */
903 static inline bool skb_pkt_type_ok(u32 ptype)
904 {
905 return ptype <= PACKET_OTHERHOST;
906 }
907
908 void kfree_skb(struct sk_buff *skb);
909 void kfree_skb_list(struct sk_buff *segs);
910 void skb_tx_error(struct sk_buff *skb);
911 void consume_skb(struct sk_buff *skb);
912 void __kfree_skb(struct sk_buff *skb);
913 extern struct kmem_cache *skbuff_head_cache;
914
915 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
916 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
917 bool *fragstolen, int *delta_truesize);
918
919 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
920 int node);
921 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
922 struct sk_buff *build_skb(void *data, unsigned int frag_size);
923 static inline struct sk_buff *alloc_skb(unsigned int size,
924 gfp_t priority)
925 {
926 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
927 }
928
929 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
930 unsigned long data_len,
931 int max_page_order,
932 int *errcode,
933 gfp_t gfp_mask);
934
935 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
936 struct sk_buff_fclones {
937 struct sk_buff skb1;
938
939 struct sk_buff skb2;
940
941 atomic_t fclone_ref;
942 };
943
944 /**
945 * skb_fclone_busy - check if fclone is busy
946 * @sk: socket
947 * @skb: buffer
948 *
949 * Returns true if skb is a fast clone, and its clone is not freed.
950 * Some drivers call skb_orphan() in their ndo_start_xmit(),
951 * so we also check that this didnt happen.
952 */
953 static inline bool skb_fclone_busy(const struct sock *sk,
954 const struct sk_buff *skb)
955 {
956 const struct sk_buff_fclones *fclones;
957
958 fclones = container_of(skb, struct sk_buff_fclones, skb1);
959
960 return skb->fclone == SKB_FCLONE_ORIG &&
961 atomic_read(&fclones->fclone_ref) > 1 &&
962 fclones->skb2.sk == sk;
963 }
964
965 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
966 gfp_t priority)
967 {
968 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
969 }
970
971 struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
972 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
973 {
974 return __alloc_skb_head(priority, -1);
975 }
976
977 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
978 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
979 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
980 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
981 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
982 gfp_t gfp_mask, bool fclone);
983 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
984 gfp_t gfp_mask)
985 {
986 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
987 }
988
989 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
990 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
991 unsigned int headroom);
992 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
993 int newtailroom, gfp_t priority);
994 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
995 int offset, int len);
996 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
997 int len);
998 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
999 int skb_pad(struct sk_buff *skb, int pad);
1000 #define dev_kfree_skb(a) consume_skb(a)
1001
1002 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1003 int getfrag(void *from, char *to, int offset,
1004 int len, int odd, struct sk_buff *skb),
1005 void *from, int length);
1006
1007 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1008 int offset, size_t size);
1009
1010 struct skb_seq_state {
1011 __u32 lower_offset;
1012 __u32 upper_offset;
1013 __u32 frag_idx;
1014 __u32 stepped_offset;
1015 struct sk_buff *root_skb;
1016 struct sk_buff *cur_skb;
1017 __u8 *frag_data;
1018 };
1019
1020 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1021 unsigned int to, struct skb_seq_state *st);
1022 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1023 struct skb_seq_state *st);
1024 void skb_abort_seq_read(struct skb_seq_state *st);
1025
1026 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1027 unsigned int to, struct ts_config *config);
1028
1029 /*
1030 * Packet hash types specify the type of hash in skb_set_hash.
1031 *
1032 * Hash types refer to the protocol layer addresses which are used to
1033 * construct a packet's hash. The hashes are used to differentiate or identify
1034 * flows of the protocol layer for the hash type. Hash types are either
1035 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1036 *
1037 * Properties of hashes:
1038 *
1039 * 1) Two packets in different flows have different hash values
1040 * 2) Two packets in the same flow should have the same hash value
1041 *
1042 * A hash at a higher layer is considered to be more specific. A driver should
1043 * set the most specific hash possible.
1044 *
1045 * A driver cannot indicate a more specific hash than the layer at which a hash
1046 * was computed. For instance an L3 hash cannot be set as an L4 hash.
1047 *
1048 * A driver may indicate a hash level which is less specific than the
1049 * actual layer the hash was computed on. For instance, a hash computed
1050 * at L4 may be considered an L3 hash. This should only be done if the
1051 * driver can't unambiguously determine that the HW computed the hash at
1052 * the higher layer. Note that the "should" in the second property above
1053 * permits this.
1054 */
1055 enum pkt_hash_types {
1056 PKT_HASH_TYPE_NONE, /* Undefined type */
1057 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
1058 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
1059 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
1060 };
1061
1062 static inline void skb_clear_hash(struct sk_buff *skb)
1063 {
1064 skb->hash = 0;
1065 skb->sw_hash = 0;
1066 skb->l4_hash = 0;
1067 }
1068
1069 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1070 {
1071 if (!skb->l4_hash)
1072 skb_clear_hash(skb);
1073 }
1074
1075 static inline void
1076 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1077 {
1078 skb->l4_hash = is_l4;
1079 skb->sw_hash = is_sw;
1080 skb->hash = hash;
1081 }
1082
1083 static inline void
1084 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1085 {
1086 /* Used by drivers to set hash from HW */
1087 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1088 }
1089
1090 static inline void
1091 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1092 {
1093 __skb_set_hash(skb, hash, true, is_l4);
1094 }
1095
1096 void __skb_get_hash(struct sk_buff *skb);
1097 u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1098 u32 skb_get_poff(const struct sk_buff *skb);
1099 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1100 const struct flow_keys *keys, int hlen);
1101 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1102 void *data, int hlen_proto);
1103
1104 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1105 int thoff, u8 ip_proto)
1106 {
1107 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1108 }
1109
1110 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1111 const struct flow_dissector_key *key,
1112 unsigned int key_count);
1113
1114 bool __skb_flow_dissect(const struct sk_buff *skb,
1115 struct flow_dissector *flow_dissector,
1116 void *target_container,
1117 void *data, __be16 proto, int nhoff, int hlen,
1118 unsigned int flags);
1119
1120 static inline bool skb_flow_dissect(const struct sk_buff *skb,
1121 struct flow_dissector *flow_dissector,
1122 void *target_container, unsigned int flags)
1123 {
1124 return __skb_flow_dissect(skb, flow_dissector, target_container,
1125 NULL, 0, 0, 0, flags);
1126 }
1127
1128 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1129 struct flow_keys *flow,
1130 unsigned int flags)
1131 {
1132 memset(flow, 0, sizeof(*flow));
1133 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1134 NULL, 0, 0, 0, flags);
1135 }
1136
1137 static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1138 void *data, __be16 proto,
1139 int nhoff, int hlen,
1140 unsigned int flags)
1141 {
1142 memset(flow, 0, sizeof(*flow));
1143 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1144 data, proto, nhoff, hlen, flags);
1145 }
1146
1147 static inline __u32 skb_get_hash(struct sk_buff *skb)
1148 {
1149 if (!skb->l4_hash && !skb->sw_hash)
1150 __skb_get_hash(skb);
1151
1152 return skb->hash;
1153 }
1154
1155 __u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1156
1157 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1158 {
1159 if (!skb->l4_hash && !skb->sw_hash) {
1160 struct flow_keys keys;
1161 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1162
1163 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1164 }
1165
1166 return skb->hash;
1167 }
1168
1169 __u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1170
1171 static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1172 {
1173 if (!skb->l4_hash && !skb->sw_hash) {
1174 struct flow_keys keys;
1175 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1176
1177 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1178 }
1179
1180 return skb->hash;
1181 }
1182
1183 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1184
1185 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1186 {
1187 return skb->hash;
1188 }
1189
1190 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1191 {
1192 to->hash = from->hash;
1193 to->sw_hash = from->sw_hash;
1194 to->l4_hash = from->l4_hash;
1195 };
1196
1197 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1198 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1199 {
1200 return skb->head + skb->end;
1201 }
1202
1203 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1204 {
1205 return skb->end;
1206 }
1207 #else
1208 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1209 {
1210 return skb->end;
1211 }
1212
1213 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1214 {
1215 return skb->end - skb->head;
1216 }
1217 #endif
1218
1219 /* Internal */
1220 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1221
1222 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1223 {
1224 return &skb_shinfo(skb)->hwtstamps;
1225 }
1226
1227 /**
1228 * skb_queue_empty - check if a queue is empty
1229 * @list: queue head
1230 *
1231 * Returns true if the queue is empty, false otherwise.
1232 */
1233 static inline int skb_queue_empty(const struct sk_buff_head *list)
1234 {
1235 return list->next == (const struct sk_buff *) list;
1236 }
1237
1238 /**
1239 * skb_queue_is_last - check if skb is the last entry in the queue
1240 * @list: queue head
1241 * @skb: buffer
1242 *
1243 * Returns true if @skb is the last buffer on the list.
1244 */
1245 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1246 const struct sk_buff *skb)
1247 {
1248 return skb->next == (const struct sk_buff *) list;
1249 }
1250
1251 /**
1252 * skb_queue_is_first - check if skb is the first entry in the queue
1253 * @list: queue head
1254 * @skb: buffer
1255 *
1256 * Returns true if @skb is the first buffer on the list.
1257 */
1258 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1259 const struct sk_buff *skb)
1260 {
1261 return skb->prev == (const struct sk_buff *) list;
1262 }
1263
1264 /**
1265 * skb_queue_next - return the next packet in the queue
1266 * @list: queue head
1267 * @skb: current buffer
1268 *
1269 * Return the next packet in @list after @skb. It is only valid to
1270 * call this if skb_queue_is_last() evaluates to false.
1271 */
1272 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1273 const struct sk_buff *skb)
1274 {
1275 /* This BUG_ON may seem severe, but if we just return then we
1276 * are going to dereference garbage.
1277 */
1278 BUG_ON(skb_queue_is_last(list, skb));
1279 return skb->next;
1280 }
1281
1282 /**
1283 * skb_queue_prev - return the prev packet in the queue
1284 * @list: queue head
1285 * @skb: current buffer
1286 *
1287 * Return the prev packet in @list before @skb. It is only valid to
1288 * call this if skb_queue_is_first() evaluates to false.
1289 */
1290 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1291 const struct sk_buff *skb)
1292 {
1293 /* This BUG_ON may seem severe, but if we just return then we
1294 * are going to dereference garbage.
1295 */
1296 BUG_ON(skb_queue_is_first(list, skb));
1297 return skb->prev;
1298 }
1299
1300 /**
1301 * skb_get - reference buffer
1302 * @skb: buffer to reference
1303 *
1304 * Makes another reference to a socket buffer and returns a pointer
1305 * to the buffer.
1306 */
1307 static inline struct sk_buff *skb_get(struct sk_buff *skb)
1308 {
1309 atomic_inc(&skb->users);
1310 return skb;
1311 }
1312
1313 /*
1314 * If users == 1, we are the only owner and are can avoid redundant
1315 * atomic change.
1316 */
1317
1318 /**
1319 * skb_cloned - is the buffer a clone
1320 * @skb: buffer to check
1321 *
1322 * Returns true if the buffer was generated with skb_clone() and is
1323 * one of multiple shared copies of the buffer. Cloned buffers are
1324 * shared data so must not be written to under normal circumstances.
1325 */
1326 static inline int skb_cloned(const struct sk_buff *skb)
1327 {
1328 return skb->cloned &&
1329 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1330 }
1331
1332 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1333 {
1334 might_sleep_if(gfpflags_allow_blocking(pri));
1335
1336 if (skb_cloned(skb))
1337 return pskb_expand_head(skb, 0, 0, pri);
1338
1339 return 0;
1340 }
1341
1342 /**
1343 * skb_header_cloned - is the header a clone
1344 * @skb: buffer to check
1345 *
1346 * Returns true if modifying the header part of the buffer requires
1347 * the data to be copied.
1348 */
1349 static inline int skb_header_cloned(const struct sk_buff *skb)
1350 {
1351 int dataref;
1352
1353 if (!skb->cloned)
1354 return 0;
1355
1356 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1357 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1358 return dataref != 1;
1359 }
1360
1361 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1362 {
1363 might_sleep_if(gfpflags_allow_blocking(pri));
1364
1365 if (skb_header_cloned(skb))
1366 return pskb_expand_head(skb, 0, 0, pri);
1367
1368 return 0;
1369 }
1370
1371 /**
1372 * skb_header_release - release reference to header
1373 * @skb: buffer to operate on
1374 *
1375 * Drop a reference to the header part of the buffer. This is done
1376 * by acquiring a payload reference. You must not read from the header
1377 * part of skb->data after this.
1378 * Note : Check if you can use __skb_header_release() instead.
1379 */
1380 static inline void skb_header_release(struct sk_buff *skb)
1381 {
1382 BUG_ON(skb->nohdr);
1383 skb->nohdr = 1;
1384 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1385 }
1386
1387 /**
1388 * __skb_header_release - release reference to header
1389 * @skb: buffer to operate on
1390 *
1391 * Variant of skb_header_release() assuming skb is private to caller.
1392 * We can avoid one atomic operation.
1393 */
1394 static inline void __skb_header_release(struct sk_buff *skb)
1395 {
1396 skb->nohdr = 1;
1397 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1398 }
1399
1400
1401 /**
1402 * skb_shared - is the buffer shared
1403 * @skb: buffer to check
1404 *
1405 * Returns true if more than one person has a reference to this
1406 * buffer.
1407 */
1408 static inline int skb_shared(const struct sk_buff *skb)
1409 {
1410 return atomic_read(&skb->users) != 1;
1411 }
1412
1413 /**
1414 * skb_share_check - check if buffer is shared and if so clone it
1415 * @skb: buffer to check
1416 * @pri: priority for memory allocation
1417 *
1418 * If the buffer is shared the buffer is cloned and the old copy
1419 * drops a reference. A new clone with a single reference is returned.
1420 * If the buffer is not shared the original buffer is returned. When
1421 * being called from interrupt status or with spinlocks held pri must
1422 * be GFP_ATOMIC.
1423 *
1424 * NULL is returned on a memory allocation failure.
1425 */
1426 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1427 {
1428 might_sleep_if(gfpflags_allow_blocking(pri));
1429 if (skb_shared(skb)) {
1430 struct sk_buff *nskb = skb_clone(skb, pri);
1431
1432 if (likely(nskb))
1433 consume_skb(skb);
1434 else
1435 kfree_skb(skb);
1436 skb = nskb;
1437 }
1438 return skb;
1439 }
1440
1441 /*
1442 * Copy shared buffers into a new sk_buff. We effectively do COW on
1443 * packets to handle cases where we have a local reader and forward
1444 * and a couple of other messy ones. The normal one is tcpdumping
1445 * a packet thats being forwarded.
1446 */
1447
1448 /**
1449 * skb_unshare - make a copy of a shared buffer
1450 * @skb: buffer to check
1451 * @pri: priority for memory allocation
1452 *
1453 * If the socket buffer is a clone then this function creates a new
1454 * copy of the data, drops a reference count on the old copy and returns
1455 * the new copy with the reference count at 1. If the buffer is not a clone
1456 * the original buffer is returned. When called with a spinlock held or
1457 * from interrupt state @pri must be %GFP_ATOMIC
1458 *
1459 * %NULL is returned on a memory allocation failure.
1460 */
1461 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1462 gfp_t pri)
1463 {
1464 might_sleep_if(gfpflags_allow_blocking(pri));
1465 if (skb_cloned(skb)) {
1466 struct sk_buff *nskb = skb_copy(skb, pri);
1467
1468 /* Free our shared copy */
1469 if (likely(nskb))
1470 consume_skb(skb);
1471 else
1472 kfree_skb(skb);
1473 skb = nskb;
1474 }
1475 return skb;
1476 }
1477
1478 /**
1479 * skb_peek - peek at the head of an &sk_buff_head
1480 * @list_: list to peek at
1481 *
1482 * Peek an &sk_buff. Unlike most other operations you _MUST_
1483 * be careful with this one. A peek leaves the buffer on the
1484 * list and someone else may run off with it. You must hold
1485 * the appropriate locks or have a private queue to do this.
1486 *
1487 * Returns %NULL for an empty list or a pointer to the head element.
1488 * The reference count is not incremented and the reference is therefore
1489 * volatile. Use with caution.
1490 */
1491 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1492 {
1493 struct sk_buff *skb = list_->next;
1494
1495 if (skb == (struct sk_buff *)list_)
1496 skb = NULL;
1497 return skb;
1498 }
1499
1500 /**
1501 * skb_peek_next - peek skb following the given one from a queue
1502 * @skb: skb to start from
1503 * @list_: list to peek at
1504 *
1505 * Returns %NULL when the end of the list is met or a pointer to the
1506 * next element. The reference count is not incremented and the
1507 * reference is therefore volatile. Use with caution.
1508 */
1509 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1510 const struct sk_buff_head *list_)
1511 {
1512 struct sk_buff *next = skb->next;
1513
1514 if (next == (struct sk_buff *)list_)
1515 next = NULL;
1516 return next;
1517 }
1518
1519 /**
1520 * skb_peek_tail - peek at the tail of an &sk_buff_head
1521 * @list_: list to peek at
1522 *
1523 * Peek an &sk_buff. Unlike most other operations you _MUST_
1524 * be careful with this one. A peek leaves the buffer on the
1525 * list and someone else may run off with it. You must hold
1526 * the appropriate locks or have a private queue to do this.
1527 *
1528 * Returns %NULL for an empty list or a pointer to the tail element.
1529 * The reference count is not incremented and the reference is therefore
1530 * volatile. Use with caution.
1531 */
1532 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1533 {
1534 struct sk_buff *skb = list_->prev;
1535
1536 if (skb == (struct sk_buff *)list_)
1537 skb = NULL;
1538 return skb;
1539
1540 }
1541
1542 /**
1543 * skb_queue_len - get queue length
1544 * @list_: list to measure
1545 *
1546 * Return the length of an &sk_buff queue.
1547 */
1548 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1549 {
1550 return list_->qlen;
1551 }
1552
1553 /**
1554 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1555 * @list: queue to initialize
1556 *
1557 * This initializes only the list and queue length aspects of
1558 * an sk_buff_head object. This allows to initialize the list
1559 * aspects of an sk_buff_head without reinitializing things like
1560 * the spinlock. It can also be used for on-stack sk_buff_head
1561 * objects where the spinlock is known to not be used.
1562 */
1563 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1564 {
1565 list->prev = list->next = (struct sk_buff *)list;
1566 list->qlen = 0;
1567 }
1568
1569 /*
1570 * This function creates a split out lock class for each invocation;
1571 * this is needed for now since a whole lot of users of the skb-queue
1572 * infrastructure in drivers have different locking usage (in hardirq)
1573 * than the networking core (in softirq only). In the long run either the
1574 * network layer or drivers should need annotation to consolidate the
1575 * main types of usage into 3 classes.
1576 */
1577 static inline void skb_queue_head_init(struct sk_buff_head *list)
1578 {
1579 spin_lock_init(&list->lock);
1580 __skb_queue_head_init(list);
1581 }
1582
1583 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1584 struct lock_class_key *class)
1585 {
1586 skb_queue_head_init(list);
1587 lockdep_set_class(&list->lock, class);
1588 }
1589
1590 /*
1591 * Insert an sk_buff on a list.
1592 *
1593 * The "__skb_xxxx()" functions are the non-atomic ones that
1594 * can only be called with interrupts disabled.
1595 */
1596 void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1597 struct sk_buff_head *list);
1598 static inline void __skb_insert(struct sk_buff *newsk,
1599 struct sk_buff *prev, struct sk_buff *next,
1600 struct sk_buff_head *list)
1601 {
1602 newsk->next = next;
1603 newsk->prev = prev;
1604 next->prev = prev->next = newsk;
1605 list->qlen++;
1606 }
1607
1608 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1609 struct sk_buff *prev,
1610 struct sk_buff *next)
1611 {
1612 struct sk_buff *first = list->next;
1613 struct sk_buff *last = list->prev;
1614
1615 first->prev = prev;
1616 prev->next = first;
1617
1618 last->next = next;
1619 next->prev = last;
1620 }
1621
1622 /**
1623 * skb_queue_splice - join two skb lists, this is designed for stacks
1624 * @list: the new list to add
1625 * @head: the place to add it in the first list
1626 */
1627 static inline void skb_queue_splice(const struct sk_buff_head *list,
1628 struct sk_buff_head *head)
1629 {
1630 if (!skb_queue_empty(list)) {
1631 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1632 head->qlen += list->qlen;
1633 }
1634 }
1635
1636 /**
1637 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1638 * @list: the new list to add
1639 * @head: the place to add it in the first list
1640 *
1641 * The list at @list is reinitialised
1642 */
1643 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1644 struct sk_buff_head *head)
1645 {
1646 if (!skb_queue_empty(list)) {
1647 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1648 head->qlen += list->qlen;
1649 __skb_queue_head_init(list);
1650 }
1651 }
1652
1653 /**
1654 * skb_queue_splice_tail - join two skb lists, each list being a queue
1655 * @list: the new list to add
1656 * @head: the place to add it in the first list
1657 */
1658 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1659 struct sk_buff_head *head)
1660 {
1661 if (!skb_queue_empty(list)) {
1662 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1663 head->qlen += list->qlen;
1664 }
1665 }
1666
1667 /**
1668 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1669 * @list: the new list to add
1670 * @head: the place to add it in the first list
1671 *
1672 * Each of the lists is a queue.
1673 * The list at @list is reinitialised
1674 */
1675 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1676 struct sk_buff_head *head)
1677 {
1678 if (!skb_queue_empty(list)) {
1679 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1680 head->qlen += list->qlen;
1681 __skb_queue_head_init(list);
1682 }
1683 }
1684
1685 /**
1686 * __skb_queue_after - queue a buffer at the list head
1687 * @list: list to use
1688 * @prev: place after this buffer
1689 * @newsk: buffer to queue
1690 *
1691 * Queue a buffer int the middle of a list. This function takes no locks
1692 * and you must therefore hold required locks before calling it.
1693 *
1694 * A buffer cannot be placed on two lists at the same time.
1695 */
1696 static inline void __skb_queue_after(struct sk_buff_head *list,
1697 struct sk_buff *prev,
1698 struct sk_buff *newsk)
1699 {
1700 __skb_insert(newsk, prev, prev->next, list);
1701 }
1702
1703 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1704 struct sk_buff_head *list);
1705
1706 static inline void __skb_queue_before(struct sk_buff_head *list,
1707 struct sk_buff *next,
1708 struct sk_buff *newsk)
1709 {
1710 __skb_insert(newsk, next->prev, next, list);
1711 }
1712
1713 /**
1714 * __skb_queue_head - queue a buffer at the list head
1715 * @list: list to use
1716 * @newsk: buffer to queue
1717 *
1718 * Queue a buffer at the start of a list. This function takes no locks
1719 * and you must therefore hold required locks before calling it.
1720 *
1721 * A buffer cannot be placed on two lists at the same time.
1722 */
1723 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1724 static inline void __skb_queue_head(struct sk_buff_head *list,
1725 struct sk_buff *newsk)
1726 {
1727 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1728 }
1729
1730 /**
1731 * __skb_queue_tail - queue a buffer at the list tail
1732 * @list: list to use
1733 * @newsk: buffer to queue
1734 *
1735 * Queue a buffer at the end of a list. This function takes no locks
1736 * and you must therefore hold required locks before calling it.
1737 *
1738 * A buffer cannot be placed on two lists at the same time.
1739 */
1740 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1741 static inline void __skb_queue_tail(struct sk_buff_head *list,
1742 struct sk_buff *newsk)
1743 {
1744 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1745 }
1746
1747 /*
1748 * remove sk_buff from list. _Must_ be called atomically, and with
1749 * the list known..
1750 */
1751 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1752 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1753 {
1754 struct sk_buff *next, *prev;
1755
1756 list->qlen--;
1757 next = skb->next;
1758 prev = skb->prev;
1759 skb->next = skb->prev = NULL;
1760 next->prev = prev;
1761 prev->next = next;
1762 }
1763
1764 /**
1765 * __skb_dequeue - remove from the head of the queue
1766 * @list: list to dequeue from
1767 *
1768 * Remove the head of the list. This function does not take any locks
1769 * so must be used with appropriate locks held only. The head item is
1770 * returned or %NULL if the list is empty.
1771 */
1772 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1773 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1774 {
1775 struct sk_buff *skb = skb_peek(list);
1776 if (skb)
1777 __skb_unlink(skb, list);
1778 return skb;
1779 }
1780
1781 /**
1782 * __skb_dequeue_tail - remove from the tail of the queue
1783 * @list: list to dequeue from
1784 *
1785 * Remove the tail of the list. This function does not take any locks
1786 * so must be used with appropriate locks held only. The tail item is
1787 * returned or %NULL if the list is empty.
1788 */
1789 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1790 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1791 {
1792 struct sk_buff *skb = skb_peek_tail(list);
1793 if (skb)
1794 __skb_unlink(skb, list);
1795 return skb;
1796 }
1797
1798
1799 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1800 {
1801 return skb->data_len;
1802 }
1803
1804 static inline unsigned int skb_headlen(const struct sk_buff *skb)
1805 {
1806 return skb->len - skb->data_len;
1807 }
1808
1809 static inline unsigned int skb_pagelen(const struct sk_buff *skb)
1810 {
1811 unsigned int i, len = 0;
1812
1813 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
1814 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1815 return len + skb_headlen(skb);
1816 }
1817
1818 /**
1819 * __skb_fill_page_desc - initialise a paged fragment in an skb
1820 * @skb: buffer containing fragment to be initialised
1821 * @i: paged fragment index to initialise
1822 * @page: the page to use for this fragment
1823 * @off: the offset to the data with @page
1824 * @size: the length of the data
1825 *
1826 * Initialises the @i'th fragment of @skb to point to &size bytes at
1827 * offset @off within @page.
1828 *
1829 * Does not take any additional reference on the fragment.
1830 */
1831 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1832 struct page *page, int off, int size)
1833 {
1834 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1835
1836 /*
1837 * Propagate page pfmemalloc to the skb if we can. The problem is
1838 * that not all callers have unique ownership of the page but rely
1839 * on page_is_pfmemalloc doing the right thing(tm).
1840 */
1841 frag->page.p = page;
1842 frag->page_offset = off;
1843 skb_frag_size_set(frag, size);
1844
1845 page = compound_head(page);
1846 if (page_is_pfmemalloc(page))
1847 skb->pfmemalloc = true;
1848 }
1849
1850 /**
1851 * skb_fill_page_desc - initialise a paged fragment in an skb
1852 * @skb: buffer containing fragment to be initialised
1853 * @i: paged fragment index to initialise
1854 * @page: the page to use for this fragment
1855 * @off: the offset to the data with @page
1856 * @size: the length of the data
1857 *
1858 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1859 * @skb to point to @size bytes at offset @off within @page. In
1860 * addition updates @skb such that @i is the last fragment.
1861 *
1862 * Does not take any additional reference on the fragment.
1863 */
1864 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1865 struct page *page, int off, int size)
1866 {
1867 __skb_fill_page_desc(skb, i, page, off, size);
1868 skb_shinfo(skb)->nr_frags = i + 1;
1869 }
1870
1871 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1872 int size, unsigned int truesize);
1873
1874 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1875 unsigned int truesize);
1876
1877 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1878 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1879 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1880
1881 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1882 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1883 {
1884 return skb->head + skb->tail;
1885 }
1886
1887 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1888 {
1889 skb->tail = skb->data - skb->head;
1890 }
1891
1892 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1893 {
1894 skb_reset_tail_pointer(skb);
1895 skb->tail += offset;
1896 }
1897
1898 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1899 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1900 {
1901 return skb->tail;
1902 }
1903
1904 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1905 {
1906 skb->tail = skb->data;
1907 }
1908
1909 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1910 {
1911 skb->tail = skb->data + offset;
1912 }
1913
1914 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1915
1916 /*
1917 * Add data to an sk_buff
1918 */
1919 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1920 unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1921 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1922 {
1923 unsigned char *tmp = skb_tail_pointer(skb);
1924 SKB_LINEAR_ASSERT(skb);
1925 skb->tail += len;
1926 skb->len += len;
1927 return tmp;
1928 }
1929
1930 unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1931 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1932 {
1933 skb->data -= len;
1934 skb->len += len;
1935 return skb->data;
1936 }
1937
1938 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1939 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1940 {
1941 skb->len -= len;
1942 BUG_ON(skb->len < skb->data_len);
1943 return skb->data += len;
1944 }
1945
1946 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1947 {
1948 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1949 }
1950
1951 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1952
1953 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1954 {
1955 if (len > skb_headlen(skb) &&
1956 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1957 return NULL;
1958 skb->len -= len;
1959 return skb->data += len;
1960 }
1961
1962 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1963 {
1964 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1965 }
1966
1967 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1968 {
1969 if (likely(len <= skb_headlen(skb)))
1970 return 1;
1971 if (unlikely(len > skb->len))
1972 return 0;
1973 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1974 }
1975
1976 void skb_condense(struct sk_buff *skb);
1977
1978 /**
1979 * skb_headroom - bytes at buffer head
1980 * @skb: buffer to check
1981 *
1982 * Return the number of bytes of free space at the head of an &sk_buff.
1983 */
1984 static inline unsigned int skb_headroom(const struct sk_buff *skb)
1985 {
1986 return skb->data - skb->head;
1987 }
1988
1989 /**
1990 * skb_tailroom - bytes at buffer end
1991 * @skb: buffer to check
1992 *
1993 * Return the number of bytes of free space at the tail of an sk_buff
1994 */
1995 static inline int skb_tailroom(const struct sk_buff *skb)
1996 {
1997 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1998 }
1999
2000 /**
2001 * skb_availroom - bytes at buffer end
2002 * @skb: buffer to check
2003 *
2004 * Return the number of bytes of free space at the tail of an sk_buff
2005 * allocated by sk_stream_alloc()
2006 */
2007 static inline int skb_availroom(const struct sk_buff *skb)
2008 {
2009 if (skb_is_nonlinear(skb))
2010 return 0;
2011
2012 return skb->end - skb->tail - skb->reserved_tailroom;
2013 }
2014
2015 /**
2016 * skb_reserve - adjust headroom
2017 * @skb: buffer to alter
2018 * @len: bytes to move
2019 *
2020 * Increase the headroom of an empty &sk_buff by reducing the tail
2021 * room. This is only allowed for an empty buffer.
2022 */
2023 static inline void skb_reserve(struct sk_buff *skb, int len)
2024 {
2025 skb->data += len;
2026 skb->tail += len;
2027 }
2028
2029 /**
2030 * skb_tailroom_reserve - adjust reserved_tailroom
2031 * @skb: buffer to alter
2032 * @mtu: maximum amount of headlen permitted
2033 * @needed_tailroom: minimum amount of reserved_tailroom
2034 *
2035 * Set reserved_tailroom so that headlen can be as large as possible but
2036 * not larger than mtu and tailroom cannot be smaller than
2037 * needed_tailroom.
2038 * The required headroom should already have been reserved before using
2039 * this function.
2040 */
2041 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2042 unsigned int needed_tailroom)
2043 {
2044 SKB_LINEAR_ASSERT(skb);
2045 if (mtu < skb_tailroom(skb) - needed_tailroom)
2046 /* use at most mtu */
2047 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2048 else
2049 /* use up to all available space */
2050 skb->reserved_tailroom = needed_tailroom;
2051 }
2052
2053 #define ENCAP_TYPE_ETHER 0
2054 #define ENCAP_TYPE_IPPROTO 1
2055
2056 static inline void skb_set_inner_protocol(struct sk_buff *skb,
2057 __be16 protocol)
2058 {
2059 skb->inner_protocol = protocol;
2060 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2061 }
2062
2063 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2064 __u8 ipproto)
2065 {
2066 skb->inner_ipproto = ipproto;
2067 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2068 }
2069
2070 static inline void skb_reset_inner_headers(struct sk_buff *skb)
2071 {
2072 skb->inner_mac_header = skb->mac_header;
2073 skb->inner_network_header = skb->network_header;
2074 skb->inner_transport_header = skb->transport_header;
2075 }
2076
2077 static inline void skb_reset_mac_len(struct sk_buff *skb)
2078 {
2079 skb->mac_len = skb->network_header - skb->mac_header;
2080 }
2081
2082 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2083 *skb)
2084 {
2085 return skb->head + skb->inner_transport_header;
2086 }
2087
2088 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2089 {
2090 return skb_inner_transport_header(skb) - skb->data;
2091 }
2092
2093 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2094 {
2095 skb->inner_transport_header = skb->data - skb->head;
2096 }
2097
2098 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2099 const int offset)
2100 {
2101 skb_reset_inner_transport_header(skb);
2102 skb->inner_transport_header += offset;
2103 }
2104
2105 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2106 {
2107 return skb->head + skb->inner_network_header;
2108 }
2109
2110 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2111 {
2112 skb->inner_network_header = skb->data - skb->head;
2113 }
2114
2115 static inline void skb_set_inner_network_header(struct sk_buff *skb,
2116 const int offset)
2117 {
2118 skb_reset_inner_network_header(skb);
2119 skb->inner_network_header += offset;
2120 }
2121
2122 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2123 {
2124 return skb->head + skb->inner_mac_header;
2125 }
2126
2127 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2128 {
2129 skb->inner_mac_header = skb->data - skb->head;
2130 }
2131
2132 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2133 const int offset)
2134 {
2135 skb_reset_inner_mac_header(skb);
2136 skb->inner_mac_header += offset;
2137 }
2138 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2139 {
2140 return skb->transport_header != (typeof(skb->transport_header))~0U;
2141 }
2142
2143 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2144 {
2145 return skb->head + skb->transport_header;
2146 }
2147
2148 static inline void skb_reset_transport_header(struct sk_buff *skb)
2149 {
2150 skb->transport_header = skb->data - skb->head;
2151 }
2152
2153 static inline void skb_set_transport_header(struct sk_buff *skb,
2154 const int offset)
2155 {
2156 skb_reset_transport_header(skb);
2157 skb->transport_header += offset;
2158 }
2159
2160 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2161 {
2162 return skb->head + skb->network_header;
2163 }
2164
2165 static inline void skb_reset_network_header(struct sk_buff *skb)
2166 {
2167 skb->network_header = skb->data - skb->head;
2168 }
2169
2170 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2171 {
2172 skb_reset_network_header(skb);
2173 skb->network_header += offset;
2174 }
2175
2176 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2177 {
2178 return skb->head + skb->mac_header;
2179 }
2180
2181 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2182 {
2183 return skb->mac_header != (typeof(skb->mac_header))~0U;
2184 }
2185
2186 static inline void skb_reset_mac_header(struct sk_buff *skb)
2187 {
2188 skb->mac_header = skb->data - skb->head;
2189 }
2190
2191 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2192 {
2193 skb_reset_mac_header(skb);
2194 skb->mac_header += offset;
2195 }
2196
2197 static inline void skb_pop_mac_header(struct sk_buff *skb)
2198 {
2199 skb->mac_header = skb->network_header;
2200 }
2201
2202 static inline void skb_probe_transport_header(struct sk_buff *skb,
2203 const int offset_hint)
2204 {
2205 struct flow_keys keys;
2206
2207 if (skb_transport_header_was_set(skb))
2208 return;
2209 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2210 skb_set_transport_header(skb, keys.control.thoff);
2211 else
2212 skb_set_transport_header(skb, offset_hint);
2213 }
2214
2215 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2216 {
2217 if (skb_mac_header_was_set(skb)) {
2218 const unsigned char *old_mac = skb_mac_header(skb);
2219
2220 skb_set_mac_header(skb, -skb->mac_len);
2221 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2222 }
2223 }
2224
2225 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2226 {
2227 return skb->csum_start - skb_headroom(skb);
2228 }
2229
2230 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2231 {
2232 return skb->head + skb->csum_start;
2233 }
2234
2235 static inline int skb_transport_offset(const struct sk_buff *skb)
2236 {
2237 return skb_transport_header(skb) - skb->data;
2238 }
2239
2240 static inline u32 skb_network_header_len(const struct sk_buff *skb)
2241 {
2242 return skb->transport_header - skb->network_header;
2243 }
2244
2245 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2246 {
2247 return skb->inner_transport_header - skb->inner_network_header;
2248 }
2249
2250 static inline int skb_network_offset(const struct sk_buff *skb)
2251 {
2252 return skb_network_header(skb) - skb->data;
2253 }
2254
2255 static inline int skb_inner_network_offset(const struct sk_buff *skb)
2256 {
2257 return skb_inner_network_header(skb) - skb->data;
2258 }
2259
2260 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2261 {
2262 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2263 }
2264
2265 /*
2266 * CPUs often take a performance hit when accessing unaligned memory
2267 * locations. The actual performance hit varies, it can be small if the
2268 * hardware handles it or large if we have to take an exception and fix it
2269 * in software.
2270 *
2271 * Since an ethernet header is 14 bytes network drivers often end up with
2272 * the IP header at an unaligned offset. The IP header can be aligned by
2273 * shifting the start of the packet by 2 bytes. Drivers should do this
2274 * with:
2275 *
2276 * skb_reserve(skb, NET_IP_ALIGN);
2277 *
2278 * The downside to this alignment of the IP header is that the DMA is now
2279 * unaligned. On some architectures the cost of an unaligned DMA is high
2280 * and this cost outweighs the gains made by aligning the IP header.
2281 *
2282 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
2283 * to be overridden.
2284 */
2285 #ifndef NET_IP_ALIGN
2286 #define NET_IP_ALIGN 2
2287 #endif
2288
2289 /*
2290 * The networking layer reserves some headroom in skb data (via
2291 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
2292 * the header has to grow. In the default case, if the header has to grow
2293 * 32 bytes or less we avoid the reallocation.
2294 *
2295 * Unfortunately this headroom changes the DMA alignment of the resulting
2296 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
2297 * on some architectures. An architecture can override this value,
2298 * perhaps setting it to a cacheline in size (since that will maintain
2299 * cacheline alignment of the DMA). It must be a power of 2.
2300 *
2301 * Various parts of the networking layer expect at least 32 bytes of
2302 * headroom, you should not reduce this.
2303 *
2304 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
2305 * to reduce average number of cache lines per packet.
2306 * get_rps_cpus() for example only access one 64 bytes aligned block :
2307 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2308 */
2309 #ifndef NET_SKB_PAD
2310 #define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2311 #endif
2312
2313 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2314
2315 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2316 {
2317 if (unlikely(skb_is_nonlinear(skb))) {
2318 WARN_ON(1);
2319 return;
2320 }
2321 skb->len = len;
2322 skb_set_tail_pointer(skb, len);
2323 }
2324
2325 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2326 {
2327 __skb_set_length(skb, len);
2328 }
2329
2330 void skb_trim(struct sk_buff *skb, unsigned int len);
2331
2332 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2333 {
2334 if (skb->data_len)
2335 return ___pskb_trim(skb, len);
2336 __skb_trim(skb, len);
2337 return 0;
2338 }
2339
2340 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2341 {
2342 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2343 }
2344
2345 /**
2346 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
2347 * @skb: buffer to alter
2348 * @len: new length
2349 *
2350 * This is identical to pskb_trim except that the caller knows that
2351 * the skb is not cloned so we should never get an error due to out-
2352 * of-memory.
2353 */
2354 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2355 {
2356 int err = pskb_trim(skb, len);
2357 BUG_ON(err);
2358 }
2359
2360 static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2361 {
2362 unsigned int diff = len - skb->len;
2363
2364 if (skb_tailroom(skb) < diff) {
2365 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2366 GFP_ATOMIC);
2367 if (ret)
2368 return ret;
2369 }
2370 __skb_set_length(skb, len);
2371 return 0;
2372 }
2373
2374 /**
2375 * skb_orphan - orphan a buffer
2376 * @skb: buffer to orphan
2377 *
2378 * If a buffer currently has an owner then we call the owner's
2379 * destructor function and make the @skb unowned. The buffer continues
2380 * to exist but is no longer charged to its former owner.
2381 */
2382 static inline void skb_orphan(struct sk_buff *skb)
2383 {
2384 if (skb->destructor) {
2385 skb->destructor(skb);
2386 skb->destructor = NULL;
2387 skb->sk = NULL;
2388 } else {
2389 BUG_ON(skb->sk);
2390 }
2391 }
2392
2393 /**
2394 * skb_orphan_frags - orphan the frags contained in a buffer
2395 * @skb: buffer to orphan frags from
2396 * @gfp_mask: allocation mask for replacement pages
2397 *
2398 * For each frag in the SKB which needs a destructor (i.e. has an
2399 * owner) create a copy of that frag and release the original
2400 * page by calling the destructor.
2401 */
2402 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2403 {
2404 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2405 return 0;
2406 return skb_copy_ubufs(skb, gfp_mask);
2407 }
2408
2409 /**
2410 * __skb_queue_purge - empty a list
2411 * @list: list to empty
2412 *
2413 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2414 * the list and one reference dropped. This function does not take the
2415 * list lock and the caller must hold the relevant locks to use it.
2416 */
2417 void skb_queue_purge(struct sk_buff_head *list);
2418 static inline void __skb_queue_purge(struct sk_buff_head *list)
2419 {
2420 struct sk_buff *skb;
2421 while ((skb = __skb_dequeue(list)) != NULL)
2422 kfree_skb(skb);
2423 }
2424
2425 void skb_rbtree_purge(struct rb_root *root);
2426
2427 void *netdev_alloc_frag(unsigned int fragsz);
2428
2429 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2430 gfp_t gfp_mask);
2431
2432 /**
2433 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
2434 * @dev: network device to receive on
2435 * @length: length to allocate
2436 *
2437 * Allocate a new &sk_buff and assign it a usage count of one. The
2438 * buffer has unspecified headroom built in. Users should allocate
2439 * the headroom they think they need without accounting for the
2440 * built in space. The built in space is used for optimisations.
2441 *
2442 * %NULL is returned if there is no free memory. Although this function
2443 * allocates memory it can be called from an interrupt.
2444 */
2445 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2446 unsigned int length)
2447 {
2448 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2449 }
2450
2451 /* legacy helper around __netdev_alloc_skb() */
2452 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2453 gfp_t gfp_mask)
2454 {
2455 return __netdev_alloc_skb(NULL, length, gfp_mask);
2456 }
2457
2458 /* legacy helper around netdev_alloc_skb() */
2459 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2460 {
2461 return netdev_alloc_skb(NULL, length);
2462 }
2463
2464
2465 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2466 unsigned int length, gfp_t gfp)
2467 {
2468 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2469
2470 if (NET_IP_ALIGN && skb)
2471 skb_reserve(skb, NET_IP_ALIGN);
2472 return skb;
2473 }
2474
2475 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2476 unsigned int length)
2477 {
2478 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2479 }
2480
2481 static inline void skb_free_frag(void *addr)
2482 {
2483 __free_page_frag(addr);
2484 }
2485
2486 void *napi_alloc_frag(unsigned int fragsz);
2487 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2488 unsigned int length, gfp_t gfp_mask);
2489 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2490 unsigned int length)
2491 {
2492 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2493 }
2494 void napi_consume_skb(struct sk_buff *skb, int budget);
2495
2496 void __kfree_skb_flush(void);
2497 void __kfree_skb_defer(struct sk_buff *skb);
2498
2499 /**
2500 * __dev_alloc_pages - allocate page for network Rx
2501 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2502 * @order: size of the allocation
2503 *
2504 * Allocate a new page.
2505 *
2506 * %NULL is returned if there is no free memory.
2507 */
2508 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2509 unsigned int order)
2510 {
2511 /* This piece of code contains several assumptions.
2512 * 1. This is for device Rx, therefor a cold page is preferred.
2513 * 2. The expectation is the user wants a compound page.
2514 * 3. If requesting a order 0 page it will not be compound
2515 * due to the check to see if order has a value in prep_new_page
2516 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2517 * code in gfp_to_alloc_flags that should be enforcing this.
2518 */
2519 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2520
2521 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2522 }
2523
2524 static inline struct page *dev_alloc_pages(unsigned int order)
2525 {
2526 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2527 }
2528
2529 /**
2530 * __dev_alloc_page - allocate a page for network Rx
2531 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2532 *
2533 * Allocate a new page.
2534 *
2535 * %NULL is returned if there is no free memory.
2536 */
2537 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2538 {
2539 return __dev_alloc_pages(gfp_mask, 0);
2540 }
2541
2542 static inline struct page *dev_alloc_page(void)
2543 {
2544 return dev_alloc_pages(0);
2545 }
2546
2547 /**
2548 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2549 * @page: The page that was allocated from skb_alloc_page
2550 * @skb: The skb that may need pfmemalloc set
2551 */
2552 static inline void skb_propagate_pfmemalloc(struct page *page,
2553 struct sk_buff *skb)
2554 {
2555 if (page_is_pfmemalloc(page))
2556 skb->pfmemalloc = true;
2557 }
2558
2559 /**
2560 * skb_frag_page - retrieve the page referred to by a paged fragment
2561 * @frag: the paged fragment
2562 *
2563 * Returns the &struct page associated with @frag.
2564 */
2565 static inline struct page *skb_frag_page(const skb_frag_t *frag)
2566 {
2567 return frag->page.p;
2568 }
2569
2570 /**
2571 * __skb_frag_ref - take an addition reference on a paged fragment.
2572 * @frag: the paged fragment
2573 *
2574 * Takes an additional reference on the paged fragment @frag.
2575 */
2576 static inline void __skb_frag_ref(skb_frag_t *frag)
2577 {
2578 get_page(skb_frag_page(frag));
2579 }
2580
2581 /**
2582 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2583 * @skb: the buffer
2584 * @f: the fragment offset.
2585 *
2586 * Takes an additional reference on the @f'th paged fragment of @skb.
2587 */
2588 static inline void skb_frag_ref(struct sk_buff *skb, int f)
2589 {
2590 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2591 }
2592
2593 /**
2594 * __skb_frag_unref - release a reference on a paged fragment.
2595 * @frag: the paged fragment
2596 *
2597 * Releases a reference on the paged fragment @frag.
2598 */
2599 static inline void __skb_frag_unref(skb_frag_t *frag)
2600 {
2601 put_page(skb_frag_page(frag));
2602 }
2603
2604 /**
2605 * skb_frag_unref - release a reference on a paged fragment of an skb.
2606 * @skb: the buffer
2607 * @f: the fragment offset
2608 *
2609 * Releases a reference on the @f'th paged fragment of @skb.
2610 */
2611 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2612 {
2613 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2614 }
2615
2616 /**
2617 * skb_frag_address - gets the address of the data contained in a paged fragment
2618 * @frag: the paged fragment buffer
2619 *
2620 * Returns the address of the data within @frag. The page must already
2621 * be mapped.
2622 */
2623 static inline void *skb_frag_address(const skb_frag_t *frag)
2624 {
2625 return page_address(skb_frag_page(frag)) + frag->page_offset;
2626 }
2627
2628 /**
2629 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2630 * @frag: the paged fragment buffer
2631 *
2632 * Returns the address of the data within @frag. Checks that the page
2633 * is mapped and returns %NULL otherwise.
2634 */
2635 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2636 {
2637 void *ptr = page_address(skb_frag_page(frag));
2638 if (unlikely(!ptr))
2639 return NULL;
2640
2641 return ptr + frag->page_offset;
2642 }
2643
2644 /**
2645 * __skb_frag_set_page - sets the page contained in a paged fragment
2646 * @frag: the paged fragment
2647 * @page: the page to set
2648 *
2649 * Sets the fragment @frag to contain @page.
2650 */
2651 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2652 {
2653 frag->page.p = page;
2654 }
2655
2656 /**
2657 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2658 * @skb: the buffer
2659 * @f: the fragment offset
2660 * @page: the page to set
2661 *
2662 * Sets the @f'th fragment of @skb to contain @page.
2663 */
2664 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2665 struct page *page)
2666 {
2667 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2668 }
2669
2670 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2671
2672 /**
2673 * skb_frag_dma_map - maps a paged fragment via the DMA API
2674 * @dev: the device to map the fragment to
2675 * @frag: the paged fragment to map
2676 * @offset: the offset within the fragment (starting at the
2677 * fragment's own offset)
2678 * @size: the number of bytes to map
2679 * @dir: the direction of the mapping (%PCI_DMA_*)
2680 *
2681 * Maps the page associated with @frag to @device.
2682 */
2683 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2684 const skb_frag_t *frag,
2685 size_t offset, size_t size,
2686 enum dma_data_direction dir)
2687 {
2688 return dma_map_page(dev, skb_frag_page(frag),
2689 frag->page_offset + offset, size, dir);
2690 }
2691
2692 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2693 gfp_t gfp_mask)
2694 {
2695 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2696 }
2697
2698
2699 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2700 gfp_t gfp_mask)
2701 {
2702 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2703 }
2704
2705
2706 /**
2707 * skb_clone_writable - is the header of a clone writable
2708 * @skb: buffer to check
2709 * @len: length up to which to write
2710 *
2711 * Returns true if modifying the header part of the cloned buffer
2712 * does not requires the data to be copied.
2713 */
2714 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2715 {
2716 return !skb_header_cloned(skb) &&
2717 skb_headroom(skb) + len <= skb->hdr_len;
2718 }
2719
2720 static inline int skb_try_make_writable(struct sk_buff *skb,
2721 unsigned int write_len)
2722 {
2723 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2724 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2725 }
2726
2727 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2728 int cloned)
2729 {
2730 int delta = 0;
2731
2732 if (headroom > skb_headroom(skb))
2733 delta = headroom - skb_headroom(skb);
2734
2735 if (delta || cloned)
2736 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2737 GFP_ATOMIC);
2738 return 0;
2739 }
2740
2741 /**
2742 * skb_cow - copy header of skb when it is required
2743 * @skb: buffer to cow
2744 * @headroom: needed headroom
2745 *
2746 * If the skb passed lacks sufficient headroom or its data part
2747 * is shared, data is reallocated. If reallocation fails, an error
2748 * is returned and original skb is not changed.
2749 *
2750 * The result is skb with writable area skb->head...skb->tail
2751 * and at least @headroom of space at head.
2752 */
2753 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2754 {
2755 return __skb_cow(skb, headroom, skb_cloned(skb));
2756 }
2757
2758 /**
2759 * skb_cow_head - skb_cow but only making the head writable
2760 * @skb: buffer to cow
2761 * @headroom: needed headroom
2762 *
2763 * This function is identical to skb_cow except that we replace the
2764 * skb_cloned check by skb_header_cloned. It should be used when
2765 * you only need to push on some header and do not need to modify
2766 * the data.
2767 */
2768 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2769 {
2770 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2771 }
2772
2773 /**
2774 * skb_padto - pad an skbuff up to a minimal size
2775 * @skb: buffer to pad
2776 * @len: minimal length
2777 *
2778 * Pads up a buffer to ensure the trailing bytes exist and are
2779 * blanked. If the buffer already contains sufficient data it
2780 * is untouched. Otherwise it is extended. Returns zero on
2781 * success. The skb is freed on error.
2782 */
2783 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2784 {
2785 unsigned int size = skb->len;
2786 if (likely(size >= len))
2787 return 0;
2788 return skb_pad(skb, len - size);
2789 }
2790
2791 /**
2792 * skb_put_padto - increase size and pad an skbuff up to a minimal size
2793 * @skb: buffer to pad
2794 * @len: minimal length
2795 *
2796 * Pads up a buffer to ensure the trailing bytes exist and are
2797 * blanked. If the buffer already contains sufficient data it
2798 * is untouched. Otherwise it is extended. Returns zero on
2799 * success. The skb is freed on error.
2800 */
2801 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2802 {
2803 unsigned int size = skb->len;
2804
2805 if (unlikely(size < len)) {
2806 len -= size;
2807 if (skb_pad(skb, len))
2808 return -ENOMEM;
2809 __skb_put(skb, len);
2810 }
2811 return 0;
2812 }
2813
2814 static inline int skb_add_data(struct sk_buff *skb,
2815 struct iov_iter *from, int copy)
2816 {
2817 const int off = skb->len;
2818
2819 if (skb->ip_summed == CHECKSUM_NONE) {
2820 __wsum csum = 0;
2821 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
2822 &csum, from)) {
2823 skb->csum = csum_block_add(skb->csum, csum, off);
2824 return 0;
2825 }
2826 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
2827 return 0;
2828
2829 __skb_trim(skb, off);
2830 return -EFAULT;
2831 }
2832
2833 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2834 const struct page *page, int off)
2835 {
2836 if (i) {
2837 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2838
2839 return page == skb_frag_page(frag) &&
2840 off == frag->page_offset + skb_frag_size(frag);
2841 }
2842 return false;
2843 }
2844
2845 static inline int __skb_linearize(struct sk_buff *skb)
2846 {
2847 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2848 }
2849
2850 /**
2851 * skb_linearize - convert paged skb to linear one
2852 * @skb: buffer to linarize
2853 *
2854 * If there is no free memory -ENOMEM is returned, otherwise zero
2855 * is returned and the old skb data released.
2856 */
2857 static inline int skb_linearize(struct sk_buff *skb)
2858 {
2859 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2860 }
2861
2862 /**
2863 * skb_has_shared_frag - can any frag be overwritten
2864 * @skb: buffer to test
2865 *
2866 * Return true if the skb has at least one frag that might be modified
2867 * by an external entity (as in vmsplice()/sendfile())
2868 */
2869 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2870 {
2871 return skb_is_nonlinear(skb) &&
2872 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2873 }
2874
2875 /**
2876 * skb_linearize_cow - make sure skb is linear and writable
2877 * @skb: buffer to process
2878 *
2879 * If there is no free memory -ENOMEM is returned, otherwise zero
2880 * is returned and the old skb data released.
2881 */
2882 static inline int skb_linearize_cow(struct sk_buff *skb)
2883 {
2884 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2885 __skb_linearize(skb) : 0;
2886 }
2887
2888 static __always_inline void
2889 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2890 unsigned int off)
2891 {
2892 if (skb->ip_summed == CHECKSUM_COMPLETE)
2893 skb->csum = csum_block_sub(skb->csum,
2894 csum_partial(start, len, 0), off);
2895 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2896 skb_checksum_start_offset(skb) < 0)
2897 skb->ip_summed = CHECKSUM_NONE;
2898 }
2899
2900 /**
2901 * skb_postpull_rcsum - update checksum for received skb after pull
2902 * @skb: buffer to update
2903 * @start: start of data before pull
2904 * @len: length of data pulled
2905 *
2906 * After doing a pull on a received packet, you need to call this to
2907 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2908 * CHECKSUM_NONE so that it can be recomputed from scratch.
2909 */
2910 static inline void skb_postpull_rcsum(struct sk_buff *skb,
2911 const void *start, unsigned int len)
2912 {
2913 __skb_postpull_rcsum(skb, start, len, 0);
2914 }
2915
2916 static __always_inline void
2917 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2918 unsigned int off)
2919 {
2920 if (skb->ip_summed == CHECKSUM_COMPLETE)
2921 skb->csum = csum_block_add(skb->csum,
2922 csum_partial(start, len, 0), off);
2923 }
2924
2925 /**
2926 * skb_postpush_rcsum - update checksum for received skb after push
2927 * @skb: buffer to update
2928 * @start: start of data after push
2929 * @len: length of data pushed
2930 *
2931 * After doing a push on a received packet, you need to call this to
2932 * update the CHECKSUM_COMPLETE checksum.
2933 */
2934 static inline void skb_postpush_rcsum(struct sk_buff *skb,
2935 const void *start, unsigned int len)
2936 {
2937 __skb_postpush_rcsum(skb, start, len, 0);
2938 }
2939
2940 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2941
2942 /**
2943 * skb_push_rcsum - push skb and update receive checksum
2944 * @skb: buffer to update
2945 * @len: length of data pulled
2946 *
2947 * This function performs an skb_push on the packet and updates
2948 * the CHECKSUM_COMPLETE checksum. It should be used on
2949 * receive path processing instead of skb_push unless you know
2950 * that the checksum difference is zero (e.g., a valid IP header)
2951 * or you are setting ip_summed to CHECKSUM_NONE.
2952 */
2953 static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2954 unsigned int len)
2955 {
2956 skb_push(skb, len);
2957 skb_postpush_rcsum(skb, skb->data, len);
2958 return skb->data;
2959 }
2960
2961 /**
2962 * pskb_trim_rcsum - trim received skb and update checksum
2963 * @skb: buffer to trim
2964 * @len: new length
2965 *
2966 * This is exactly the same as pskb_trim except that it ensures the
2967 * checksum of received packets are still valid after the operation.
2968 */
2969
2970 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2971 {
2972 if (likely(len >= skb->len))
2973 return 0;
2974 if (skb->ip_summed == CHECKSUM_COMPLETE)
2975 skb->ip_summed = CHECKSUM_NONE;
2976 return __pskb_trim(skb, len);
2977 }
2978
2979 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2980 {
2981 if (skb->ip_summed == CHECKSUM_COMPLETE)
2982 skb->ip_summed = CHECKSUM_NONE;
2983 __skb_trim(skb, len);
2984 return 0;
2985 }
2986
2987 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2988 {
2989 if (skb->ip_summed == CHECKSUM_COMPLETE)
2990 skb->ip_summed = CHECKSUM_NONE;
2991 return __skb_grow(skb, len);
2992 }
2993
2994 #define skb_queue_walk(queue, skb) \
2995 for (skb = (queue)->next; \
2996 skb != (struct sk_buff *)(queue); \
2997 skb = skb->next)
2998
2999 #define skb_queue_walk_safe(queue, skb, tmp) \
3000 for (skb = (queue)->next, tmp = skb->next; \
3001 skb != (struct sk_buff *)(queue); \
3002 skb = tmp, tmp = skb->next)
3003
3004 #define skb_queue_walk_from(queue, skb) \
3005 for (; skb != (struct sk_buff *)(queue); \
3006 skb = skb->next)
3007
3008 #define skb_queue_walk_from_safe(queue, skb, tmp) \
3009 for (tmp = skb->next; \
3010 skb != (struct sk_buff *)(queue); \
3011 skb = tmp, tmp = skb->next)
3012
3013 #define skb_queue_reverse_walk(queue, skb) \
3014 for (skb = (queue)->prev; \
3015 skb != (struct sk_buff *)(queue); \
3016 skb = skb->prev)
3017
3018 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3019 for (skb = (queue)->prev, tmp = skb->prev; \
3020 skb != (struct sk_buff *)(queue); \
3021 skb = tmp, tmp = skb->prev)
3022
3023 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3024 for (tmp = skb->prev; \
3025 skb != (struct sk_buff *)(queue); \
3026 skb = tmp, tmp = skb->prev)
3027
3028 static inline bool skb_has_frag_list(const struct sk_buff *skb)
3029 {
3030 return skb_shinfo(skb)->frag_list != NULL;
3031 }
3032
3033 static inline void skb_frag_list_init(struct sk_buff *skb)
3034 {
3035 skb_shinfo(skb)->frag_list = NULL;
3036 }
3037
3038 #define skb_walk_frags(skb, iter) \
3039 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3040
3041
3042 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3043 const struct sk_buff *skb);
3044 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3045 void (*destructor)(struct sock *sk,
3046 struct sk_buff *skb),
3047 int *peeked, int *off, int *err,
3048 struct sk_buff **last);
3049 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3050 void (*destructor)(struct sock *sk,
3051 struct sk_buff *skb),
3052 int *peeked, int *off, int *err);
3053 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3054 int *err);
3055 unsigned int datagram_poll(struct file *file, struct socket *sock,
3056 struct poll_table_struct *wait);
3057 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3058 struct iov_iter *to, int size);
3059 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3060 struct msghdr *msg, int size)
3061 {
3062 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3063 }
3064 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3065 struct msghdr *msg);
3066 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3067 struct iov_iter *from, int len);
3068 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3069 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3070 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3071 static inline void skb_free_datagram_locked(struct sock *sk,
3072 struct sk_buff *skb)
3073 {
3074 __skb_free_datagram_locked(sk, skb, 0);
3075 }
3076 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3077 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3078 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3079 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3080 int len, __wsum csum);
3081 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3082 struct pipe_inode_info *pipe, unsigned int len,
3083 unsigned int flags);
3084 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3085 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3086 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3087 int len, int hlen);
3088 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3089 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3090 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3091 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3092 bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3093 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3094 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3095 int skb_ensure_writable(struct sk_buff *skb, int write_len);
3096 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3097 int skb_vlan_pop(struct sk_buff *skb);
3098 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3099 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3100 gfp_t gfp);
3101
3102 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3103 {
3104 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3105 }
3106
3107 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3108 {
3109 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3110 }
3111
3112 struct skb_checksum_ops {
3113 __wsum (*update)(const void *mem, int len, __wsum wsum);
3114 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3115 };
3116
3117 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3118 __wsum csum, const struct skb_checksum_ops *ops);
3119 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3120 __wsum csum);
3121
3122 static inline void * __must_check
3123 __skb_header_pointer(const struct sk_buff *skb, int offset,
3124 int len, void *data, int hlen, void *buffer)
3125 {
3126 if (hlen - offset >= len)
3127 return data + offset;
3128
3129 if (!skb ||
3130 skb_copy_bits(skb, offset, buffer, len) < 0)
3131 return NULL;
3132
3133 return buffer;
3134 }
3135
3136 static inline void * __must_check
3137 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3138 {
3139 return __skb_header_pointer(skb, offset, len, skb->data,
3140 skb_headlen(skb), buffer);
3141 }
3142
3143 /**
3144 * skb_needs_linearize - check if we need to linearize a given skb
3145 * depending on the given device features.
3146 * @skb: socket buffer to check
3147 * @features: net device features
3148 *
3149 * Returns true if either:
3150 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
3151 * 2. skb is fragmented and the device does not support SG.
3152 */
3153 static inline bool skb_needs_linearize(struct sk_buff *skb,
3154 netdev_features_t features)
3155 {
3156 return skb_is_nonlinear(skb) &&
3157 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3158 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3159 }
3160
3161 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3162 void *to,
3163 const unsigned int len)
3164 {
3165 memcpy(to, skb->data, len);
3166 }
3167
3168 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3169 const int offset, void *to,
3170 const unsigned int len)
3171 {
3172 memcpy(to, skb->data + offset, len);
3173 }
3174
3175 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3176 const void *from,
3177 const unsigned int len)
3178 {
3179 memcpy(skb->data, from, len);
3180 }
3181
3182 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3183 const int offset,
3184 const void *from,
3185 const unsigned int len)
3186 {
3187 memcpy(skb->data + offset, from, len);
3188 }
3189
3190 void skb_init(void);
3191
3192 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3193 {
3194 return skb->tstamp;
3195 }
3196
3197 /**
3198 * skb_get_timestamp - get timestamp from a skb
3199 * @skb: skb to get stamp from
3200 * @stamp: pointer to struct timeval to store stamp in
3201 *
3202 * Timestamps are stored in the skb as offsets to a base timestamp.
3203 * This function converts the offset back to a struct timeval and stores
3204 * it in stamp.
3205 */
3206 static inline void skb_get_timestamp(const struct sk_buff *skb,
3207 struct timeval *stamp)
3208 {
3209 *stamp = ktime_to_timeval(skb->tstamp);
3210 }
3211
3212 static inline void skb_get_timestampns(const struct sk_buff *skb,
3213 struct timespec *stamp)
3214 {
3215 *stamp = ktime_to_timespec(skb->tstamp);
3216 }
3217
3218 static inline void __net_timestamp(struct sk_buff *skb)
3219 {
3220 skb->tstamp = ktime_get_real();
3221 }
3222
3223 static inline ktime_t net_timedelta(ktime_t t)
3224 {
3225 return ktime_sub(ktime_get_real(), t);
3226 }
3227
3228 static inline ktime_t net_invalid_timestamp(void)
3229 {
3230 return 0;
3231 }
3232
3233 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3234
3235 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3236
3237 void skb_clone_tx_timestamp(struct sk_buff *skb);
3238 bool skb_defer_rx_timestamp(struct sk_buff *skb);
3239
3240 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
3241
3242 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3243 {
3244 }
3245
3246 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3247 {
3248 return false;
3249 }
3250
3251 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
3252
3253 /**
3254 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
3255 *
3256 * PHY drivers may accept clones of transmitted packets for
3257 * timestamping via their phy_driver.txtstamp method. These drivers
3258 * must call this function to return the skb back to the stack with a
3259 * timestamp.
3260 *
3261 * @skb: clone of the the original outgoing packet
3262 * @hwtstamps: hardware time stamps
3263 *
3264 */
3265 void skb_complete_tx_timestamp(struct sk_buff *skb,
3266 struct skb_shared_hwtstamps *hwtstamps);
3267
3268 void __skb_tstamp_tx(struct sk_buff *orig_skb,
3269 struct skb_shared_hwtstamps *hwtstamps,
3270 struct sock *sk, int tstype);
3271
3272 /**
3273 * skb_tstamp_tx - queue clone of skb with send time stamps
3274 * @orig_skb: the original outgoing packet
3275 * @hwtstamps: hardware time stamps, may be NULL if not available
3276 *
3277 * If the skb has a socket associated, then this function clones the
3278 * skb (thus sharing the actual data and optional structures), stores
3279 * the optional hardware time stamping information (if non NULL) or
3280 * generates a software time stamp (otherwise), then queues the clone
3281 * to the error queue of the socket. Errors are silently ignored.
3282 */
3283 void skb_tstamp_tx(struct sk_buff *orig_skb,
3284 struct skb_shared_hwtstamps *hwtstamps);
3285
3286 static inline void sw_tx_timestamp(struct sk_buff *skb)
3287 {
3288 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
3289 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3290 skb_tstamp_tx(skb, NULL);
3291 }
3292
3293 /**
3294 * skb_tx_timestamp() - Driver hook for transmit timestamping
3295 *
3296 * Ethernet MAC Drivers should call this function in their hard_xmit()
3297 * function immediately before giving the sk_buff to the MAC hardware.
3298 *
3299 * Specifically, one should make absolutely sure that this function is
3300 * called before TX completion of this packet can trigger. Otherwise
3301 * the packet could potentially already be freed.
3302 *
3303 * @skb: A socket buffer.
3304 */
3305 static inline void skb_tx_timestamp(struct sk_buff *skb)
3306 {
3307 skb_clone_tx_timestamp(skb);
3308 sw_tx_timestamp(skb);
3309 }
3310
3311 /**
3312 * skb_complete_wifi_ack - deliver skb with wifi status
3313 *
3314 * @skb: the original outgoing packet
3315 * @acked: ack status
3316 *
3317 */
3318 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3319
3320 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3321 __sum16 __skb_checksum_complete(struct sk_buff *skb);
3322
3323 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3324 {
3325 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3326 skb->csum_valid ||
3327 (skb->ip_summed == CHECKSUM_PARTIAL &&
3328 skb_checksum_start_offset(skb) >= 0));
3329 }
3330
3331 /**
3332 * skb_checksum_complete - Calculate checksum of an entire packet
3333 * @skb: packet to process
3334 *
3335 * This function calculates the checksum over the entire packet plus
3336 * the value of skb->csum. The latter can be used to supply the
3337 * checksum of a pseudo header as used by TCP/UDP. It returns the
3338 * checksum.
3339 *
3340 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
3341 * this function can be used to verify that checksum on received
3342 * packets. In that case the function should return zero if the
3343 * checksum is correct. In particular, this function will return zero
3344 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
3345 * hardware has already verified the correctness of the checksum.
3346 */
3347 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3348 {
3349 return skb_csum_unnecessary(skb) ?
3350 0 : __skb_checksum_complete(skb);
3351 }
3352
3353 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3354 {
3355 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3356 if (skb->csum_level == 0)
3357 skb->ip_summed = CHECKSUM_NONE;
3358 else
3359 skb->csum_level--;
3360 }
3361 }
3362
3363 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3364 {
3365 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3366 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3367 skb->csum_level++;
3368 } else if (skb->ip_summed == CHECKSUM_NONE) {
3369 skb->ip_summed = CHECKSUM_UNNECESSARY;
3370 skb->csum_level = 0;
3371 }
3372 }
3373
3374 static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
3375 {
3376 /* Mark current checksum as bad (typically called from GRO
3377 * path). In the case that ip_summed is CHECKSUM_NONE
3378 * this must be the first checksum encountered in the packet.
3379 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
3380 * checksum after the last one validated. For UDP, a zero
3381 * checksum can not be marked as bad.
3382 */
3383
3384 if (skb->ip_summed == CHECKSUM_NONE ||
3385 skb->ip_summed == CHECKSUM_UNNECESSARY)
3386 skb->csum_bad = 1;
3387 }
3388
3389 /* Check if we need to perform checksum complete validation.
3390 *
3391 * Returns true if checksum complete is needed, false otherwise
3392 * (either checksum is unnecessary or zero checksum is allowed).
3393 */
3394 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3395 bool zero_okay,
3396 __sum16 check)
3397 {
3398 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3399 skb->csum_valid = 1;
3400 __skb_decr_checksum_unnecessary(skb);
3401 return false;
3402 }
3403
3404 return true;
3405 }
3406
3407 /* For small packets <= CHECKSUM_BREAK peform checksum complete directly
3408 * in checksum_init.
3409 */
3410 #define CHECKSUM_BREAK 76
3411
3412 /* Unset checksum-complete
3413 *
3414 * Unset checksum complete can be done when packet is being modified
3415 * (uncompressed for instance) and checksum-complete value is
3416 * invalidated.
3417 */
3418 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3419 {
3420 if (skb->ip_summed == CHECKSUM_COMPLETE)
3421 skb->ip_summed = CHECKSUM_NONE;
3422 }
3423
3424 /* Validate (init) checksum based on checksum complete.
3425 *
3426 * Return values:
3427 * 0: checksum is validated or try to in skb_checksum_complete. In the latter
3428 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
3429 * checksum is stored in skb->csum for use in __skb_checksum_complete
3430 * non-zero: value of invalid checksum
3431 *
3432 */
3433 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3434 bool complete,
3435 __wsum psum)
3436 {
3437 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3438 if (!csum_fold(csum_add(psum, skb->csum))) {
3439 skb->csum_valid = 1;
3440 return 0;
3441 }
3442 } else if (skb->csum_bad) {
3443 /* ip_summed == CHECKSUM_NONE in this case */
3444 return (__force __sum16)1;
3445 }
3446
3447 skb->csum = psum;
3448
3449 if (complete || skb->len <= CHECKSUM_BREAK) {
3450 __sum16 csum;
3451
3452 csum = __skb_checksum_complete(skb);
3453 skb->csum_valid = !csum;
3454 return csum;
3455 }
3456
3457 return 0;
3458 }
3459
3460 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3461 {
3462 return 0;
3463 }
3464
3465 /* Perform checksum validate (init). Note that this is a macro since we only
3466 * want to calculate the pseudo header which is an input function if necessary.
3467 * First we try to validate without any computation (checksum unnecessary) and
3468 * then calculate based on checksum complete calling the function to compute
3469 * pseudo header.
3470 *
3471 * Return values:
3472 * 0: checksum is validated or try to in skb_checksum_complete
3473 * non-zero: value of invalid checksum
3474 */
3475 #define __skb_checksum_validate(skb, proto, complete, \
3476 zero_okay, check, compute_pseudo) \
3477 ({ \
3478 __sum16 __ret = 0; \
3479 skb->csum_valid = 0; \
3480 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3481 __ret = __skb_checksum_validate_complete(skb, \
3482 complete, compute_pseudo(skb, proto)); \
3483 __ret; \
3484 })
3485
3486 #define skb_checksum_init(skb, proto, compute_pseudo) \
3487 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3488
3489 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3490 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3491
3492 #define skb_checksum_validate(skb, proto, compute_pseudo) \
3493 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3494
3495 #define skb_checksum_validate_zero_check(skb, proto, check, \
3496 compute_pseudo) \
3497 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3498
3499 #define skb_checksum_simple_validate(skb) \
3500 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3501
3502 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3503 {
3504 return (skb->ip_summed == CHECKSUM_NONE &&
3505 skb->csum_valid && !skb->csum_bad);
3506 }
3507
3508 static inline void __skb_checksum_convert(struct sk_buff *skb,
3509 __sum16 check, __wsum pseudo)
3510 {
3511 skb->csum = ~pseudo;
3512 skb->ip_summed = CHECKSUM_COMPLETE;
3513 }
3514
3515 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3516 do { \
3517 if (__skb_checksum_convert_check(skb)) \
3518 __skb_checksum_convert(skb, check, \
3519 compute_pseudo(skb, proto)); \
3520 } while (0)
3521
3522 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3523 u16 start, u16 offset)
3524 {
3525 skb->ip_summed = CHECKSUM_PARTIAL;
3526 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3527 skb->csum_offset = offset - start;
3528 }
3529
3530 /* Update skbuf and packet to reflect the remote checksum offload operation.
3531 * When called, ptr indicates the starting point for skb->csum when
3532 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
3533 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
3534 */
3535 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3536 int start, int offset, bool nopartial)
3537 {
3538 __wsum delta;
3539
3540 if (!nopartial) {
3541 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3542 return;
3543 }
3544
3545 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3546 __skb_checksum_complete(skb);
3547 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3548 }
3549
3550 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3551
3552 /* Adjust skb->csum since we changed the packet */
3553 skb->csum = csum_add(skb->csum, delta);
3554 }
3555
3556 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3557 void nf_conntrack_destroy(struct nf_conntrack *nfct);
3558 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3559 {
3560 if (nfct && atomic_dec_and_test(&nfct->use))
3561 nf_conntrack_destroy(nfct);
3562 }
3563 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3564 {
3565 if (nfct)
3566 atomic_inc(&nfct->use);
3567 }
3568 #endif
3569 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3570 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3571 {
3572 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3573 kfree(nf_bridge);
3574 }
3575 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3576 {
3577 if (nf_bridge)
3578 atomic_inc(&nf_bridge->use);
3579 }
3580 #endif /* CONFIG_BRIDGE_NETFILTER */
3581 static inline void nf_reset(struct sk_buff *skb)
3582 {
3583 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3584 nf_conntrack_put(skb->nfct);
3585 skb->nfct = NULL;
3586 #endif
3587 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3588 nf_bridge_put(skb->nf_bridge);
3589 skb->nf_bridge = NULL;
3590 #endif
3591 }
3592
3593 static inline void nf_reset_trace(struct sk_buff *skb)
3594 {
3595 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3596 skb->nf_trace = 0;
3597 #endif
3598 }
3599
3600 /* Note: This doesn't put any conntrack and bridge info in dst. */
3601 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3602 bool copy)
3603 {
3604 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3605 dst->nfct = src->nfct;
3606 nf_conntrack_get(src->nfct);
3607 if (copy)
3608 dst->nfctinfo = src->nfctinfo;
3609 #endif
3610 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3611 dst->nf_bridge = src->nf_bridge;
3612 nf_bridge_get(src->nf_bridge);
3613 #endif
3614 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3615 if (copy)
3616 dst->nf_trace = src->nf_trace;
3617 #endif
3618 }
3619
3620 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3621 {
3622 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3623 nf_conntrack_put(dst->nfct);
3624 #endif
3625 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3626 nf_bridge_put(dst->nf_bridge);
3627 #endif
3628 __nf_copy(dst, src, true);
3629 }
3630
3631 #ifdef CONFIG_NETWORK_SECMARK
3632 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3633 {
3634 to->secmark = from->secmark;
3635 }
3636
3637 static inline void skb_init_secmark(struct sk_buff *skb)
3638 {
3639 skb->secmark = 0;
3640 }
3641 #else
3642 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3643 { }
3644
3645 static inline void skb_init_secmark(struct sk_buff *skb)
3646 { }
3647 #endif
3648
3649 static inline bool skb_irq_freeable(const struct sk_buff *skb)
3650 {
3651 return !skb->destructor &&
3652 #if IS_ENABLED(CONFIG_XFRM)
3653 !skb->sp &&
3654 #endif
3655 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
3656 !skb->nfct &&
3657 #endif
3658 !skb->_skb_refdst &&
3659 !skb_has_frag_list(skb);
3660 }
3661
3662 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3663 {
3664 skb->queue_mapping = queue_mapping;
3665 }
3666
3667 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3668 {
3669 return skb->queue_mapping;
3670 }
3671
3672 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3673 {
3674 to->queue_mapping = from->queue_mapping;
3675 }
3676
3677 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3678 {
3679 skb->queue_mapping = rx_queue + 1;
3680 }
3681
3682 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3683 {
3684 return skb->queue_mapping - 1;
3685 }
3686
3687 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3688 {
3689 return skb->queue_mapping != 0;
3690 }
3691
3692 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3693 {
3694 #ifdef CONFIG_XFRM
3695 return skb->sp;
3696 #else
3697 return NULL;
3698 #endif
3699 }
3700
3701 /* Keeps track of mac header offset relative to skb->head.
3702 * It is useful for TSO of Tunneling protocol. e.g. GRE.
3703 * For non-tunnel skb it points to skb_mac_header() and for
3704 * tunnel skb it points to outer mac header.
3705 * Keeps track of level of encapsulation of network headers.
3706 */
3707 struct skb_gso_cb {
3708 union {
3709 int mac_offset;
3710 int data_offset;
3711 };
3712 int encap_level;
3713 __wsum csum;
3714 __u16 csum_start;
3715 };
3716 #define SKB_SGO_CB_OFFSET 32
3717 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3718
3719 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3720 {
3721 return (skb_mac_header(inner_skb) - inner_skb->head) -
3722 SKB_GSO_CB(inner_skb)->mac_offset;
3723 }
3724
3725 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3726 {
3727 int new_headroom, headroom;
3728 int ret;
3729
3730 headroom = skb_headroom(skb);
3731 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3732 if (ret)
3733 return ret;
3734
3735 new_headroom = skb_headroom(skb);
3736 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3737 return 0;
3738 }
3739
3740 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3741 {
3742 /* Do not update partial checksums if remote checksum is enabled. */
3743 if (skb->remcsum_offload)
3744 return;
3745
3746 SKB_GSO_CB(skb)->csum = res;
3747 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3748 }
3749
3750 /* Compute the checksum for a gso segment. First compute the checksum value
3751 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
3752 * then add in skb->csum (checksum from csum_start to end of packet).
3753 * skb->csum and csum_start are then updated to reflect the checksum of the
3754 * resultant packet starting from the transport header-- the resultant checksum
3755 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
3756 * header.
3757 */
3758 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3759 {
3760 unsigned char *csum_start = skb_transport_header(skb);
3761 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3762 __wsum partial = SKB_GSO_CB(skb)->csum;
3763
3764 SKB_GSO_CB(skb)->csum = res;
3765 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3766
3767 return csum_fold(csum_partial(csum_start, plen, partial));
3768 }
3769
3770 static inline bool skb_is_gso(const struct sk_buff *skb)
3771 {
3772 return skb_shinfo(skb)->gso_size;
3773 }
3774
3775 /* Note: Should be called only if skb_is_gso(skb) is true */
3776 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3777 {
3778 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3779 }
3780
3781 static inline void skb_gso_reset(struct sk_buff *skb)
3782 {
3783 skb_shinfo(skb)->gso_size = 0;
3784 skb_shinfo(skb)->gso_segs = 0;
3785 skb_shinfo(skb)->gso_type = 0;
3786 }
3787
3788 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3789
3790 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3791 {
3792 /* LRO sets gso_size but not gso_type, whereas if GSO is really
3793 * wanted then gso_type will be set. */
3794 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3795
3796 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3797 unlikely(shinfo->gso_type == 0)) {
3798 __skb_warn_lro_forwarding(skb);
3799 return true;
3800 }
3801 return false;
3802 }
3803
3804 static inline void skb_forward_csum(struct sk_buff *skb)
3805 {
3806 /* Unfortunately we don't support this one. Any brave souls? */
3807 if (skb->ip_summed == CHECKSUM_COMPLETE)
3808 skb->ip_summed = CHECKSUM_NONE;
3809 }
3810
3811 /**
3812 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
3813 * @skb: skb to check
3814 *
3815 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
3816 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
3817 * use this helper, to document places where we make this assertion.
3818 */
3819 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3820 {
3821 #ifdef DEBUG
3822 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3823 #endif
3824 }
3825
3826 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3827
3828 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3829 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3830 unsigned int transport_len,
3831 __sum16(*skb_chkf)(struct sk_buff *skb));
3832
3833 /**
3834 * skb_head_is_locked - Determine if the skb->head is locked down
3835 * @skb: skb to check
3836 *
3837 * The head on skbs build around a head frag can be removed if they are
3838 * not cloned. This function returns true if the skb head is locked down
3839 * due to either being allocated via kmalloc, or by being a clone with
3840 * multiple references to the head.
3841 */
3842 static inline bool skb_head_is_locked(const struct sk_buff *skb)
3843 {
3844 return !skb->head_frag || skb_cloned(skb);
3845 }
3846
3847 /**
3848 * skb_gso_network_seglen - Return length of individual segments of a gso packet
3849 *
3850 * @skb: GSO skb
3851 *
3852 * skb_gso_network_seglen is used to determine the real size of the
3853 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
3854 *
3855 * The MAC/L2 header is not accounted for.
3856 */
3857 static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3858 {
3859 unsigned int hdr_len = skb_transport_header(skb) -
3860 skb_network_header(skb);
3861 return hdr_len + skb_gso_transport_seglen(skb);
3862 }
3863
3864 /* Local Checksum Offload.
3865 * Compute outer checksum based on the assumption that the
3866 * inner checksum will be offloaded later.
3867 * See Documentation/networking/checksum-offloads.txt for
3868 * explanation of how this works.
3869 * Fill in outer checksum adjustment (e.g. with sum of outer
3870 * pseudo-header) before calling.
3871 * Also ensure that inner checksum is in linear data area.
3872 */
3873 static inline __wsum lco_csum(struct sk_buff *skb)
3874 {
3875 unsigned char *csum_start = skb_checksum_start(skb);
3876 unsigned char *l4_hdr = skb_transport_header(skb);
3877 __wsum partial;
3878
3879 /* Start with complement of inner checksum adjustment */
3880 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3881 skb->csum_offset));
3882
3883 /* Add in checksum of our headers (incl. outer checksum
3884 * adjustment filled in by caller) and return result.
3885 */
3886 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3887 }
3888
3889 #endif /* __KERNEL__ */
3890 #endif /* _LINUX_SKBUFF_H */ 1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
9 */
10
11 #ifndef _LINUX_SLAB_H
12 #define _LINUX_SLAB_H
13
14 #include <linux/gfp.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17
18
19 /*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
22 */
23 #define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
30 /*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
64 */
65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
68
69 /* Flag to prevent checks on free */
70 #ifdef CONFIG_DEBUG_OBJECTS
71 # define SLAB_DEBUG_OBJECTS 0x00400000UL
72 #else
73 # define SLAB_DEBUG_OBJECTS 0x00000000UL
74 #endif
75
76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
77
78 /* Don't track use of uninitialized memory */
79 #ifdef CONFIG_KMEMCHECK
80 # define SLAB_NOTRACK 0x01000000UL
81 #else
82 # define SLAB_NOTRACK 0x00000000UL
83 #endif
84 #ifdef CONFIG_FAILSLAB
85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
86 #else
87 # define SLAB_FAILSLAB 0x00000000UL
88 #endif
89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
90 # define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
91 #else
92 # define SLAB_ACCOUNT 0x00000000UL
93 #endif
94
95 #ifdef CONFIG_KASAN
96 #define SLAB_KASAN 0x08000000UL
97 #else
98 #define SLAB_KASAN 0x00000000UL
99 #endif
100
101 /* The following flags affect the page allocator grouping pages by mobility */
102 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
103 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
104 /*
105 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
106 *
107 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
108 *
109 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
110 * Both make kfree a no-op.
111 */
112 #define ZERO_SIZE_PTR ((void *)16)
113
114 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
115 (unsigned long)ZERO_SIZE_PTR)
116
117 #include <linux/kmemleak.h>
118 #include <linux/kasan.h>
119
120 struct mem_cgroup;
121 /*
122 * struct kmem_cache related prototypes
123 */
124 void __init kmem_cache_init(void);
125 bool slab_is_available(void);
126
127 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128 unsigned long,
129 void (*)(void *));
130 void kmem_cache_destroy(struct kmem_cache *);
131 int kmem_cache_shrink(struct kmem_cache *);
132
133 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
134 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
135 void memcg_destroy_kmem_caches(struct mem_cgroup *);
136
137 /*
138 * Please use this macro to create slab caches. Simply specify the
139 * name of the structure and maybe some flags that are listed above.
140 *
141 * The alignment of the struct determines object alignment. If you
142 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
143 * then the objects will be properly aligned in SMP configurations.
144 */
145 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
146 sizeof(struct __struct), __alignof__(struct __struct),\
147 (__flags), NULL)
148
149 /*
150 * Common kmalloc functions provided by all allocators
151 */
152 void * __must_check __krealloc(const void *, size_t, gfp_t);
153 void * __must_check krealloc(const void *, size_t, gfp_t);
154 void kfree(const void *);
155 void kzfree(const void *);
156 size_t ksize(const void *);
157
158 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
159 const char *__check_heap_object(const void *ptr, unsigned long n,
160 struct page *page);
161 #else
162 static inline const char *__check_heap_object(const void *ptr,
163 unsigned long n,
164 struct page *page)
165 {
166 return NULL;
167 }
168 #endif
169
170 /*
171 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
172 * alignment larger than the alignment of a 64-bit integer.
173 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
174 */
175 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
176 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
177 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
178 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
179 #else
180 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
181 #endif
182
183 /*
184 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
185 * Intended for arches that get misalignment faults even for 64 bit integer
186 * aligned buffers.
187 */
188 #ifndef ARCH_SLAB_MINALIGN
189 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
190 #endif
191
192 /*
193 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
194 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
195 * aligned pointers.
196 */
197 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
198 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
199 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
200
201 /*
202 * Kmalloc array related definitions
203 */
204
205 #ifdef CONFIG_SLAB
206 /*
207 * The largest kmalloc size supported by the SLAB allocators is
208 * 32 megabyte (2^25) or the maximum allocatable page order if that is
209 * less than 32 MB.
210 *
211 * WARNING: Its not easy to increase this value since the allocators have
212 * to do various tricks to work around compiler limitations in order to
213 * ensure proper constant folding.
214 */
215 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
216 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
217 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
218 #ifndef KMALLOC_SHIFT_LOW
219 #define KMALLOC_SHIFT_LOW 5
220 #endif
221 #endif
222
223 #ifdef CONFIG_SLUB
224 /*
225 * SLUB directly allocates requests fitting in to an order-1 page
226 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
227 */
228 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
229 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
230 #ifndef KMALLOC_SHIFT_LOW
231 #define KMALLOC_SHIFT_LOW 3
232 #endif
233 #endif
234
235 #ifdef CONFIG_SLOB
236 /*
237 * SLOB passes all requests larger than one page to the page allocator.
238 * No kmalloc array is necessary since objects of different sizes can
239 * be allocated from the same page.
240 */
241 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
242 #define KMALLOC_SHIFT_MAX 30
243 #ifndef KMALLOC_SHIFT_LOW
244 #define KMALLOC_SHIFT_LOW 3
245 #endif
246 #endif
247
248 /* Maximum allocatable size */
249 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
250 /* Maximum size for which we actually use a slab cache */
251 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
252 /* Maximum order allocatable via the slab allocagtor */
253 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
254
255 /*
256 * Kmalloc subsystem.
257 */
258 #ifndef KMALLOC_MIN_SIZE
259 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
260 #endif
261
262 /*
263 * This restriction comes from byte sized index implementation.
264 * Page size is normally 2^12 bytes and, in this case, if we want to use
265 * byte sized index which can represent 2^8 entries, the size of the object
266 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
267 * If minimum size of kmalloc is less than 16, we use it as minimum object
268 * size and give up to use byte sized index.
269 */
270 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
271 (KMALLOC_MIN_SIZE) : 16)
272
273 #ifndef CONFIG_SLOB
274 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
275 #ifdef CONFIG_ZONE_DMA
276 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
277 #endif
278
279 /*
280 * Figure out which kmalloc slab an allocation of a certain size
281 * belongs to.
282 * 0 = zero alloc
283 * 1 = 65 .. 96 bytes
284 * 2 = 129 .. 192 bytes
285 * n = 2^(n-1)+1 .. 2^n
286 */
287 static __always_inline int kmalloc_index(size_t size)
288 {
289 if (!size)
290 return 0;
291
292 if (size <= KMALLOC_MIN_SIZE)
293 return KMALLOC_SHIFT_LOW;
294
295 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
296 return 1;
297 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
298 return 2;
299 if (size <= 8) return 3;
300 if (size <= 16) return 4;
301 if (size <= 32) return 5;
302 if (size <= 64) return 6;
303 if (size <= 128) return 7;
304 if (size <= 256) return 8;
305 if (size <= 512) return 9;
306 if (size <= 1024) return 10;
307 if (size <= 2 * 1024) return 11;
308 if (size <= 4 * 1024) return 12;
309 if (size <= 8 * 1024) return 13;
310 if (size <= 16 * 1024) return 14;
311 if (size <= 32 * 1024) return 15;
312 if (size <= 64 * 1024) return 16;
313 if (size <= 128 * 1024) return 17;
314 if (size <= 256 * 1024) return 18;
315 if (size <= 512 * 1024) return 19;
316 if (size <= 1024 * 1024) return 20;
317 if (size <= 2 * 1024 * 1024) return 21;
318 if (size <= 4 * 1024 * 1024) return 22;
319 if (size <= 8 * 1024 * 1024) return 23;
320 if (size <= 16 * 1024 * 1024) return 24;
321 if (size <= 32 * 1024 * 1024) return 25;
322 if (size <= 64 * 1024 * 1024) return 26;
323 BUG();
324
325 /* Will never be reached. Needed because the compiler may complain */
326 return -1;
327 }
328 #endif /* !CONFIG_SLOB */
329
330 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
331 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
332 void kmem_cache_free(struct kmem_cache *, void *);
333
334 /*
335 * Bulk allocation and freeing operations. These are accelerated in an
336 * allocator specific way to avoid taking locks repeatedly or building
337 * metadata structures unnecessarily.
338 *
339 * Note that interrupts must be enabled when calling these functions.
340 */
341 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
342 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
343
344 /*
345 * Caller must not use kfree_bulk() on memory not originally allocated
346 * by kmalloc(), because the SLOB allocator cannot handle this.
347 */
348 static __always_inline void kfree_bulk(size_t size, void **p)
349 {
350 kmem_cache_free_bulk(NULL, size, p);
351 }
352
353 #ifdef CONFIG_NUMA
354 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
355 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
356 #else
357 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
358 {
359 return __kmalloc(size, flags);
360 }
361
362 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
363 {
364 return kmem_cache_alloc(s, flags);
365 }
366 #endif
367
368 #ifdef CONFIG_TRACING
369 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
370
371 #ifdef CONFIG_NUMA
372 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
373 gfp_t gfpflags,
374 int node, size_t size) __assume_slab_alignment __malloc;
375 #else
376 static __always_inline void *
377 kmem_cache_alloc_node_trace(struct kmem_cache *s,
378 gfp_t gfpflags,
379 int node, size_t size)
380 {
381 return kmem_cache_alloc_trace(s, gfpflags, size);
382 }
383 #endif /* CONFIG_NUMA */
384
385 #else /* CONFIG_TRACING */
386 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
387 gfp_t flags, size_t size)
388 {
389 void *ret = kmem_cache_alloc(s, flags);
390
391 kasan_kmalloc(s, ret, size, flags);
392 return ret;
393 }
394
395 static __always_inline void *
396 kmem_cache_alloc_node_trace(struct kmem_cache *s,
397 gfp_t gfpflags,
398 int node, size_t size)
399 {
400 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
401
402 kasan_kmalloc(s, ret, size, gfpflags);
403 return ret;
404 }
405 #endif /* CONFIG_TRACING */
406
407 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
408
409 #ifdef CONFIG_TRACING
410 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
411 #else
412 static __always_inline void *
413 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
414 {
415 return kmalloc_order(size, flags, order);
416 }
417 #endif
418
419 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
420 {
421 unsigned int order = get_order(size);
422 return kmalloc_order_trace(size, flags, order);
423 }
424
425 /**
426 * kmalloc - allocate memory
427 * @size: how many bytes of memory are required.
428 * @flags: the type of memory to allocate.
429 *
430 * kmalloc is the normal method of allocating memory
431 * for objects smaller than page size in the kernel.
432 *
433 * The @flags argument may be one of:
434 *
435 * %GFP_USER - Allocate memory on behalf of user. May sleep.
436 *
437 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
438 *
439 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
440 * For example, use this inside interrupt handlers.
441 *
442 * %GFP_HIGHUSER - Allocate pages from high memory.
443 *
444 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
445 *
446 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
447 *
448 * %GFP_NOWAIT - Allocation will not sleep.
449 *
450 * %__GFP_THISNODE - Allocate node-local memory only.
451 *
452 * %GFP_DMA - Allocation suitable for DMA.
453 * Should only be used for kmalloc() caches. Otherwise, use a
454 * slab created with SLAB_DMA.
455 *
456 * Also it is possible to set different flags by OR'ing
457 * in one or more of the following additional @flags:
458 *
459 * %__GFP_COLD - Request cache-cold pages instead of
460 * trying to return cache-warm pages.
461 *
462 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
463 *
464 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
465 * (think twice before using).
466 *
467 * %__GFP_NORETRY - If memory is not immediately available,
468 * then give up at once.
469 *
470 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
471 *
472 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
473 *
474 * There are other flags available as well, but these are not intended
475 * for general use, and so are not documented here. For a full list of
476 * potential flags, always refer to linux/gfp.h.
477 */
478 static __always_inline void *kmalloc(size_t size, gfp_t flags)
479 {
480 if (__builtin_constant_p(size)) {
481 if (size > KMALLOC_MAX_CACHE_SIZE)
482 return kmalloc_large(size, flags);
483 #ifndef CONFIG_SLOB
484 if (!(flags & GFP_DMA)) {
485 int index = kmalloc_index(size);
486
487 if (!index)
488 return ZERO_SIZE_PTR;
489
490 return kmem_cache_alloc_trace(kmalloc_caches[index],
491 flags, size);
492 }
493 #endif
494 }
495 return __kmalloc(size, flags);
496 }
497
498 /*
499 * Determine size used for the nth kmalloc cache.
500 * return size or 0 if a kmalloc cache for that
501 * size does not exist
502 */
503 static __always_inline int kmalloc_size(int n)
504 {
505 #ifndef CONFIG_SLOB
506 if (n > 2)
507 return 1 << n;
508
509 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
510 return 96;
511
512 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
513 return 192;
514 #endif
515 return 0;
516 }
517
518 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
519 {
520 #ifndef CONFIG_SLOB
521 if (__builtin_constant_p(size) &&
522 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
523 int i = kmalloc_index(size);
524
525 if (!i)
526 return ZERO_SIZE_PTR;
527
528 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
529 flags, node, size);
530 }
531 #endif
532 return __kmalloc_node(size, flags, node);
533 }
534
535 struct memcg_cache_array {
536 struct rcu_head rcu;
537 struct kmem_cache *entries[0];
538 };
539
540 /*
541 * This is the main placeholder for memcg-related information in kmem caches.
542 * Both the root cache and the child caches will have it. For the root cache,
543 * this will hold a dynamically allocated array large enough to hold
544 * information about the currently limited memcgs in the system. To allow the
545 * array to be accessed without taking any locks, on relocation we free the old
546 * version only after a grace period.
547 *
548 * Child caches will hold extra metadata needed for its operation. Fields are:
549 *
550 * @memcg: pointer to the memcg this cache belongs to
551 * @root_cache: pointer to the global, root cache, this cache was derived from
552 *
553 * Both root and child caches of the same kind are linked into a list chained
554 * through @list.
555 */
556 struct memcg_cache_params {
557 bool is_root_cache;
558 struct list_head list;
559 union {
560 struct memcg_cache_array __rcu *memcg_caches;
561 struct {
562 struct mem_cgroup *memcg;
563 struct kmem_cache *root_cache;
564 };
565 };
566 };
567
568 int memcg_update_all_caches(int num_memcgs);
569
570 /**
571 * kmalloc_array - allocate memory for an array.
572 * @n: number of elements.
573 * @size: element size.
574 * @flags: the type of memory to allocate (see kmalloc).
575 */
576 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
577 {
578 if (size != 0 && n > SIZE_MAX / size)
579 return NULL;
580 if (__builtin_constant_p(n) && __builtin_constant_p(size))
581 return kmalloc(n * size, flags);
582 return __kmalloc(n * size, flags);
583 }
584
585 /**
586 * kcalloc - allocate memory for an array. The memory is set to zero.
587 * @n: number of elements.
588 * @size: element size.
589 * @flags: the type of memory to allocate (see kmalloc).
590 */
591 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
592 {
593 return kmalloc_array(n, size, flags | __GFP_ZERO);
594 }
595
596 /*
597 * kmalloc_track_caller is a special version of kmalloc that records the
598 * calling function of the routine calling it for slab leak tracking instead
599 * of just the calling function (confusing, eh?).
600 * It's useful when the call to kmalloc comes from a widely-used standard
601 * allocator where we care about the real place the memory allocation
602 * request comes from.
603 */
604 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
605 #define kmalloc_track_caller(size, flags) \
606 __kmalloc_track_caller(size, flags, _RET_IP_)
607
608 #ifdef CONFIG_NUMA
609 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
610 #define kmalloc_node_track_caller(size, flags, node) \
611 __kmalloc_node_track_caller(size, flags, node, \
612 _RET_IP_)
613
614 #else /* CONFIG_NUMA */
615
616 #define kmalloc_node_track_caller(size, flags, node) \
617 kmalloc_track_caller(size, flags)
618
619 #endif /* CONFIG_NUMA */
620
621 /*
622 * Shortcuts
623 */
624 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
625 {
626 return kmem_cache_alloc(k, flags | __GFP_ZERO);
627 }
628
629 /**
630 * kzalloc - allocate memory. The memory is set to zero.
631 * @size: how many bytes of memory are required.
632 * @flags: the type of memory to allocate (see kmalloc).
633 */
634 static inline void *kzalloc(size_t size, gfp_t flags)
635 {
636 return kmalloc(size, flags | __GFP_ZERO);
637 }
638
639 /**
640 * kzalloc_node - allocate zeroed memory from a particular memory node.
641 * @size: how many bytes of memory are required.
642 * @flags: the type of memory to allocate (see kmalloc).
643 * @node: memory node from which to allocate
644 */
645 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
646 {
647 return kmalloc_node(size, flags | __GFP_ZERO, node);
648 }
649
650 unsigned int kmem_cache_size(struct kmem_cache *s);
651 void __init kmem_cache_init_late(void);
652
653 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
654 int slab_prepare_cpu(unsigned int cpu);
655 int slab_dead_cpu(unsigned int cpu);
656 #else
657 #define slab_prepare_cpu NULL
658 #define slab_dead_cpu NULL
659 #endif
660
661 #endif /* _LINUX_SLAB_H */ 1 #ifndef __NET_ACT_API_H
2 #define __NET_ACT_API_H
3
4 /*
5 * Public action API for classifiers/qdiscs
6 */
7
8 #include <net/sch_generic.h>
9 #include <net/pkt_sched.h>
10 #include <net/net_namespace.h>
11 #include <net/netns/generic.h>
12
13
14 struct tcf_hashinfo {
15 struct hlist_head *htab;
16 unsigned int hmask;
17 spinlock_t lock;
18 u32 index;
19 };
20
21 struct tc_action_ops;
22
23 struct tc_action {
24 const struct tc_action_ops *ops;
25 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
26 __u32 order;
27 struct list_head list;
28 struct tcf_hashinfo *hinfo;
29
30 struct hlist_node tcfa_head;
31 u32 tcfa_index;
32 int tcfa_refcnt;
33 int tcfa_bindcnt;
34 u32 tcfa_capab;
35 int tcfa_action;
36 struct tcf_t tcfa_tm;
37 struct gnet_stats_basic_packed tcfa_bstats;
38 struct gnet_stats_queue tcfa_qstats;
39 struct net_rate_estimator __rcu *tcfa_rate_est;
40 spinlock_t tcfa_lock;
41 struct rcu_head tcfa_rcu;
42 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
43 struct gnet_stats_queue __percpu *cpu_qstats;
44 };
45 #define tcf_head common.tcfa_head
46 #define tcf_index common.tcfa_index
47 #define tcf_refcnt common.tcfa_refcnt
48 #define tcf_bindcnt common.tcfa_bindcnt
49 #define tcf_capab common.tcfa_capab
50 #define tcf_action common.tcfa_action
51 #define tcf_tm common.tcfa_tm
52 #define tcf_bstats common.tcfa_bstats
53 #define tcf_qstats common.tcfa_qstats
54 #define tcf_rate_est common.tcfa_rate_est
55 #define tcf_lock common.tcfa_lock
56 #define tcf_rcu common.tcfa_rcu
57
58 static inline unsigned int tcf_hash(u32 index, unsigned int hmask)
59 {
60 return index & hmask;
61 }
62
63 static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask)
64 {
65 int i;
66
67 spin_lock_init(&hf->lock);
68 hf->index = 0;
69 hf->hmask = mask;
70 hf->htab = kzalloc((mask + 1) * sizeof(struct hlist_head),
71 GFP_KERNEL);
72 if (!hf->htab)
73 return -ENOMEM;
74 for (i = 0; i < mask + 1; i++)
75 INIT_HLIST_HEAD(&hf->htab[i]);
76 return 0;
77 }
78
79 /* Update lastuse only if needed, to avoid dirtying a cache line.
80 * We use a temp variable to avoid fetching jiffies twice.
81 */
82 static inline void tcf_lastuse_update(struct tcf_t *tm)
83 {
84 unsigned long now = jiffies;
85
86 if (tm->lastuse != now)
87 tm->lastuse = now;
88 if (unlikely(!tm->firstuse))
89 tm->firstuse = now;
90 }
91
92 static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
93 {
94 dtm->install = jiffies_to_clock_t(jiffies - stm->install);
95 dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
96 dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
97 dtm->expires = jiffies_to_clock_t(stm->expires);
98 }
99
100 #ifdef CONFIG_NET_CLS_ACT
101
102 #define ACT_P_CREATED 1
103 #define ACT_P_DELETED 1
104
105 struct tc_action_ops {
106 struct list_head head;
107 char kind[IFNAMSIZ];
108 __u32 type; /* TBD to match kind */
109 size_t size;
110 struct module *owner;
111 int (*act)(struct sk_buff *, const struct tc_action *,
112 struct tcf_result *);
113 int (*dump)(struct sk_buff *, struct tc_action *, int, int);
114 void (*cleanup)(struct tc_action *, int bind);
115 int (*lookup)(struct net *, struct tc_action **, u32);
116 int (*init)(struct net *net, struct nlattr *nla,
117 struct nlattr *est, struct tc_action **act, int ovr,
118 int bind);
119 int (*walk)(struct net *, struct sk_buff *,
120 struct netlink_callback *, int, const struct tc_action_ops *);
121 void (*stats_update)(struct tc_action *, u64, u32, u64);
122 int (*get_dev)(const struct tc_action *a, struct net *net,
123 struct net_device **mirred_dev);
124 };
125
126 struct tc_action_net {
127 struct tcf_hashinfo *hinfo;
128 const struct tc_action_ops *ops;
129 };
130
131 static inline
132 int tc_action_net_init(struct tc_action_net *tn,
133 const struct tc_action_ops *ops, unsigned int mask)
134 {
135 int err = 0;
136
137 tn->hinfo = kmalloc(sizeof(*tn->hinfo), GFP_KERNEL);
138 if (!tn->hinfo)
139 return -ENOMEM;
140 tn->ops = ops;
141 err = tcf_hashinfo_init(tn->hinfo, mask);
142 if (err)
143 kfree(tn->hinfo);
144 return err;
145 }
146
147 void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
148 struct tcf_hashinfo *hinfo);
149
150 static inline void tc_action_net_exit(struct tc_action_net *tn)
151 {
152 tcf_hashinfo_destroy(tn->ops, tn->hinfo);
153 kfree(tn->hinfo);
154 }
155
156 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
157 struct netlink_callback *cb, int type,
158 const struct tc_action_ops *ops);
159 int tcf_hash_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
160 u32 tcf_hash_new_index(struct tc_action_net *tn);
161 bool tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
162 int bind);
163 int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
164 struct tc_action **a, const struct tc_action_ops *ops, int bind,
165 bool cpustats);
166 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
167 void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a);
168
169 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
170
171 static inline int tcf_hash_release(struct tc_action *a, bool bind)
172 {
173 return __tcf_hash_release(a, bind, false);
174 }
175
176 int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
177 int tcf_unregister_action(struct tc_action_ops *a,
178 struct pernet_operations *ops);
179 int tcf_action_destroy(struct list_head *actions, int bind);
180 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
181 int nr_actions, struct tcf_result *res);
182 int tcf_action_init(struct net *net, struct nlattr *nla,
183 struct nlattr *est, char *n, int ovr,
184 int bind, struct list_head *);
185 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
186 struct nlattr *est, char *n, int ovr,
187 int bind);
188 int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
189 int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
190 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
191 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
192
193 #endif /* CONFIG_NET_CLS_ACT */
194
195 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
196 u64 packets, u64 lastuse)
197 {
198 #ifdef CONFIG_NET_CLS_ACT
199 if (!a->ops->stats_update)
200 return;
201
202 a->ops->stats_update(a, bytes, packets, lastuse);
203 #endif
204 }
205
206 #endif 1 #ifndef __NET_NETLINK_H
2 #define __NET_NETLINK_H
3
4 #include <linux/types.h>
5 #include <linux/netlink.h>
6 #include <linux/jiffies.h>
7 #include <linux/in6.h>
8
9 /* ========================================================================
10 * Netlink Messages and Attributes Interface (As Seen On TV)
11 * ------------------------------------------------------------------------
12 * Messages Interface
13 * ------------------------------------------------------------------------
14 *
15 * Message Format:
16 * <--- nlmsg_total_size(payload) --->
17 * <-- nlmsg_msg_size(payload) ->
18 * +----------+- - -+-------------+- - -+-------- - -
19 * | nlmsghdr | Pad | Payload | Pad | nlmsghdr
20 * +----------+- - -+-------------+- - -+-------- - -
21 * nlmsg_data(nlh)---^ ^
22 * nlmsg_next(nlh)-----------------------+
23 *
24 * Payload Format:
25 * <---------------------- nlmsg_len(nlh) --------------------->
26 * <------ hdrlen ------> <- nlmsg_attrlen(nlh, hdrlen) ->
27 * +----------------------+- - -+--------------------------------+
28 * | Family Header | Pad | Attributes |
29 * +----------------------+- - -+--------------------------------+
30 * nlmsg_attrdata(nlh, hdrlen)---^
31 *
32 * Data Structures:
33 * struct nlmsghdr netlink message header
34 *
35 * Message Construction:
36 * nlmsg_new() create a new netlink message
37 * nlmsg_put() add a netlink message to an skb
38 * nlmsg_put_answer() callback based nlmsg_put()
39 * nlmsg_end() finalize netlink message
40 * nlmsg_get_pos() return current position in message
41 * nlmsg_trim() trim part of message
42 * nlmsg_cancel() cancel message construction
43 * nlmsg_free() free a netlink message
44 *
45 * Message Sending:
46 * nlmsg_multicast() multicast message to several groups
47 * nlmsg_unicast() unicast a message to a single socket
48 * nlmsg_notify() send notification message
49 *
50 * Message Length Calculations:
51 * nlmsg_msg_size(payload) length of message w/o padding
52 * nlmsg_total_size(payload) length of message w/ padding
53 * nlmsg_padlen(payload) length of padding at tail
54 *
55 * Message Payload Access:
56 * nlmsg_data(nlh) head of message payload
57 * nlmsg_len(nlh) length of message payload
58 * nlmsg_attrdata(nlh, hdrlen) head of attributes data
59 * nlmsg_attrlen(nlh, hdrlen) length of attributes data
60 *
61 * Message Parsing:
62 * nlmsg_ok(nlh, remaining) does nlh fit into remaining bytes?
63 * nlmsg_next(nlh, remaining) get next netlink message
64 * nlmsg_parse() parse attributes of a message
65 * nlmsg_find_attr() find an attribute in a message
66 * nlmsg_for_each_msg() loop over all messages
67 * nlmsg_validate() validate netlink message incl. attrs
68 * nlmsg_for_each_attr() loop over all attributes
69 *
70 * Misc:
71 * nlmsg_report() report back to application?
72 *
73 * ------------------------------------------------------------------------
74 * Attributes Interface
75 * ------------------------------------------------------------------------
76 *
77 * Attribute Format:
78 * <------- nla_total_size(payload) ------->
79 * <---- nla_attr_size(payload) ----->
80 * +----------+- - -+- - - - - - - - - +- - -+-------- - -
81 * | Header | Pad | Payload | Pad | Header
82 * +----------+- - -+- - - - - - - - - +- - -+-------- - -
83 * <- nla_len(nla) -> ^
84 * nla_data(nla)----^ |
85 * nla_next(nla)-----------------------------'
86 *
87 * Data Structures:
88 * struct nlattr netlink attribute header
89 *
90 * Attribute Construction:
91 * nla_reserve(skb, type, len) reserve room for an attribute
92 * nla_reserve_nohdr(skb, len) reserve room for an attribute w/o hdr
93 * nla_put(skb, type, len, data) add attribute to skb
94 * nla_put_nohdr(skb, len, data) add attribute w/o hdr
95 * nla_append(skb, len, data) append data to skb
96 *
97 * Attribute Construction for Basic Types:
98 * nla_put_u8(skb, type, value) add u8 attribute to skb
99 * nla_put_u16(skb, type, value) add u16 attribute to skb
100 * nla_put_u32(skb, type, value) add u32 attribute to skb
101 * nla_put_u64_64bits(skb, type,
102 * value, padattr) add u64 attribute to skb
103 * nla_put_s8(skb, type, value) add s8 attribute to skb
104 * nla_put_s16(skb, type, value) add s16 attribute to skb
105 * nla_put_s32(skb, type, value) add s32 attribute to skb
106 * nla_put_s64(skb, type, value,
107 * padattr) add s64 attribute to skb
108 * nla_put_string(skb, type, str) add string attribute to skb
109 * nla_put_flag(skb, type) add flag attribute to skb
110 * nla_put_msecs(skb, type, jiffies,
111 * padattr) add msecs attribute to skb
112 * nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
113 * nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
114 *
115 * Nested Attributes Construction:
116 * nla_nest_start(skb, type) start a nested attribute
117 * nla_nest_end(skb, nla) finalize a nested attribute
118 * nla_nest_cancel(skb, nla) cancel nested attribute construction
119 *
120 * Attribute Length Calculations:
121 * nla_attr_size(payload) length of attribute w/o padding
122 * nla_total_size(payload) length of attribute w/ padding
123 * nla_padlen(payload) length of padding
124 *
125 * Attribute Payload Access:
126 * nla_data(nla) head of attribute payload
127 * nla_len(nla) length of attribute payload
128 *
129 * Attribute Payload Access for Basic Types:
130 * nla_get_u8(nla) get payload for a u8 attribute
131 * nla_get_u16(nla) get payload for a u16 attribute
132 * nla_get_u32(nla) get payload for a u32 attribute
133 * nla_get_u64(nla) get payload for a u64 attribute
134 * nla_get_s8(nla) get payload for a s8 attribute
135 * nla_get_s16(nla) get payload for a s16 attribute
136 * nla_get_s32(nla) get payload for a s32 attribute
137 * nla_get_s64(nla) get payload for a s64 attribute
138 * nla_get_flag(nla) return 1 if flag is true
139 * nla_get_msecs(nla) get payload for a msecs attribute
140 *
141 * Attribute Misc:
142 * nla_memcpy(dest, nla, count) copy attribute into memory
143 * nla_memcmp(nla, data, size) compare attribute with memory area
144 * nla_strlcpy(dst, nla, size) copy attribute to a sized string
145 * nla_strcmp(nla, str) compare attribute with string
146 *
147 * Attribute Parsing:
148 * nla_ok(nla, remaining) does nla fit into remaining bytes?
149 * nla_next(nla, remaining) get next netlink attribute
150 * nla_validate() validate a stream of attributes
151 * nla_validate_nested() validate a stream of nested attributes
152 * nla_find() find attribute in stream of attributes
153 * nla_find_nested() find attribute in nested attributes
154 * nla_parse() parse and validate stream of attrs
155 * nla_parse_nested() parse nested attribuets
156 * nla_for_each_attr() loop over all attributes
157 * nla_for_each_nested() loop over the nested attributes
158 *=========================================================================
159 */
160
161 /**
162 * Standard attribute types to specify validation policy
163 */
164 enum {
165 NLA_UNSPEC,
166 NLA_U8,
167 NLA_U16,
168 NLA_U32,
169 NLA_U64,
170 NLA_STRING,
171 NLA_FLAG,
172 NLA_MSECS,
173 NLA_NESTED,
174 NLA_NESTED_COMPAT,
175 NLA_NUL_STRING,
176 NLA_BINARY,
177 NLA_S8,
178 NLA_S16,
179 NLA_S32,
180 NLA_S64,
181 __NLA_TYPE_MAX,
182 };
183
184 #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
185
186 /**
187 * struct nla_policy - attribute validation policy
188 * @type: Type of attribute or NLA_UNSPEC
189 * @len: Type specific length of payload
190 *
191 * Policies are defined as arrays of this struct, the array must be
192 * accessible by attribute type up to the highest identifier to be expected.
193 *
194 * Meaning of `len' field:
195 * NLA_STRING Maximum length of string
196 * NLA_NUL_STRING Maximum length of string (excluding NUL)
197 * NLA_FLAG Unused
198 * NLA_BINARY Maximum length of attribute payload
199 * NLA_NESTED Don't use `len' field -- length verification is
200 * done by checking len of nested header (or empty)
201 * NLA_NESTED_COMPAT Minimum length of structure payload
202 * NLA_U8, NLA_U16,
203 * NLA_U32, NLA_U64,
204 * NLA_S8, NLA_S16,
205 * NLA_S32, NLA_S64,
206 * NLA_MSECS Leaving the length field zero will verify the
207 * given type fits, using it verifies minimum length
208 * just like "All other"
209 * All other Minimum length of attribute payload
210 *
211 * Example:
212 * static const struct nla_policy my_policy[ATTR_MAX+1] = {
213 * [ATTR_FOO] = { .type = NLA_U16 },
214 * [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
215 * [ATTR_BAZ] = { .len = sizeof(struct mystruct) },
216 * };
217 */
218 struct nla_policy {
219 u16 type;
220 u16 len;
221 };
222
223 /**
224 * struct nl_info - netlink source information
225 * @nlh: Netlink message header of original request
226 * @portid: Netlink PORTID of requesting application
227 */
228 struct nl_info {
229 struct nlmsghdr *nlh;
230 struct net *nl_net;
231 u32 portid;
232 };
233
234 int netlink_rcv_skb(struct sk_buff *skb,
235 int (*cb)(struct sk_buff *, struct nlmsghdr *));
236 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
237 unsigned int group, int report, gfp_t flags);
238
239 int nla_validate(const struct nlattr *head, int len, int maxtype,
240 const struct nla_policy *policy);
241 int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
242 int len, const struct nla_policy *policy);
243 int nla_policy_len(const struct nla_policy *, int);
244 struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
245 size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
246 int nla_memcpy(void *dest, const struct nlattr *src, int count);
247 int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
248 int nla_strcmp(const struct nlattr *nla, const char *str);
249 struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
250 struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
251 int attrlen, int padattr);
252 void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
253 struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
254 struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
255 int attrlen, int padattr);
256 void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
257 void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
258 const void *data);
259 void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
260 const void *data, int padattr);
261 void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
262 int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
263 int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
264 const void *data, int padattr);
265 int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
266 int nla_append(struct sk_buff *skb, int attrlen, const void *data);
267
268 /**************************************************************************
269 * Netlink Messages
270 **************************************************************************/
271
272 /**
273 * nlmsg_msg_size - length of netlink message not including padding
274 * @payload: length of message payload
275 */
276 static inline int nlmsg_msg_size(int payload)
277 {
278 return NLMSG_HDRLEN + payload;
279 }
280
281 /**
282 * nlmsg_total_size - length of netlink message including padding
283 * @payload: length of message payload
284 */
285 static inline int nlmsg_total_size(int payload)
286 {
287 return NLMSG_ALIGN(nlmsg_msg_size(payload));
288 }
289
290 /**
291 * nlmsg_padlen - length of padding at the message's tail
292 * @payload: length of message payload
293 */
294 static inline int nlmsg_padlen(int payload)
295 {
296 return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
297 }
298
299 /**
300 * nlmsg_data - head of message payload
301 * @nlh: netlink message header
302 */
303 static inline void *nlmsg_data(const struct nlmsghdr *nlh)
304 {
305 return (unsigned char *) nlh + NLMSG_HDRLEN;
306 }
307
308 /**
309 * nlmsg_len - length of message payload
310 * @nlh: netlink message header
311 */
312 static inline int nlmsg_len(const struct nlmsghdr *nlh)
313 {
314 return nlh->nlmsg_len - NLMSG_HDRLEN;
315 }
316
317 /**
318 * nlmsg_attrdata - head of attributes data
319 * @nlh: netlink message header
320 * @hdrlen: length of family specific header
321 */
322 static inline struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
323 int hdrlen)
324 {
325 unsigned char *data = nlmsg_data(nlh);
326 return (struct nlattr *) (data + NLMSG_ALIGN(hdrlen));
327 }
328
329 /**
330 * nlmsg_attrlen - length of attributes data
331 * @nlh: netlink message header
332 * @hdrlen: length of family specific header
333 */
334 static inline int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
335 {
336 return nlmsg_len(nlh) - NLMSG_ALIGN(hdrlen);
337 }
338
339 /**
340 * nlmsg_ok - check if the netlink message fits into the remaining bytes
341 * @nlh: netlink message header
342 * @remaining: number of bytes remaining in message stream
343 */
344 static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
345 {
346 return (remaining >= (int) sizeof(struct nlmsghdr) &&
347 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
348 nlh->nlmsg_len <= remaining);
349 }
350
351 /**
352 * nlmsg_next - next netlink message in message stream
353 * @nlh: netlink message header
354 * @remaining: number of bytes remaining in message stream
355 *
356 * Returns the next netlink message in the message stream and
357 * decrements remaining by the size of the current message.
358 */
359 static inline struct nlmsghdr *
360 nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
361 {
362 int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
363
364 *remaining -= totlen;
365
366 return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
367 }
368
369 /**
370 * nlmsg_parse - parse attributes of a netlink message
371 * @nlh: netlink message header
372 * @hdrlen: length of family specific header
373 * @tb: destination array with maxtype+1 elements
374 * @maxtype: maximum attribute type to be expected
375 * @policy: validation policy
376 *
377 * See nla_parse()
378 */
379 static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
380 struct nlattr *tb[], int maxtype,
381 const struct nla_policy *policy)
382 {
383 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
384 return -EINVAL;
385
386 return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
387 nlmsg_attrlen(nlh, hdrlen), policy);
388 }
389
390 /**
391 * nlmsg_find_attr - find a specific attribute in a netlink message
392 * @nlh: netlink message header
393 * @hdrlen: length of familiy specific header
394 * @attrtype: type of attribute to look for
395 *
396 * Returns the first attribute which matches the specified type.
397 */
398 static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
399 int hdrlen, int attrtype)
400 {
401 return nla_find(nlmsg_attrdata(nlh, hdrlen),
402 nlmsg_attrlen(nlh, hdrlen), attrtype);
403 }
404
405 /**
406 * nlmsg_validate - validate a netlink message including attributes
407 * @nlh: netlinket message header
408 * @hdrlen: length of familiy specific header
409 * @maxtype: maximum attribute type to be expected
410 * @policy: validation policy
411 */
412 static inline int nlmsg_validate(const struct nlmsghdr *nlh,
413 int hdrlen, int maxtype,
414 const struct nla_policy *policy)
415 {
416 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
417 return -EINVAL;
418
419 return nla_validate(nlmsg_attrdata(nlh, hdrlen),
420 nlmsg_attrlen(nlh, hdrlen), maxtype, policy);
421 }
422
423 /**
424 * nlmsg_report - need to report back to application?
425 * @nlh: netlink message header
426 *
427 * Returns 1 if a report back to the application is requested.
428 */
429 static inline int nlmsg_report(const struct nlmsghdr *nlh)
430 {
431 return !!(nlh->nlmsg_flags & NLM_F_ECHO);
432 }
433
434 /**
435 * nlmsg_for_each_attr - iterate over a stream of attributes
436 * @pos: loop counter, set to current attribute
437 * @nlh: netlink message header
438 * @hdrlen: length of familiy specific header
439 * @rem: initialized to len, holds bytes currently remaining in stream
440 */
441 #define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
442 nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
443 nlmsg_attrlen(nlh, hdrlen), rem)
444
445 /**
446 * nlmsg_put - Add a new netlink message to an skb
447 * @skb: socket buffer to store message in
448 * @portid: netlink PORTID of requesting application
449 * @seq: sequence number of message
450 * @type: message type
451 * @payload: length of message payload
452 * @flags: message flags
453 *
454 * Returns NULL if the tailroom of the skb is insufficient to store
455 * the message header and payload.
456 */
457 static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
458 int type, int payload, int flags)
459 {
460 if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
461 return NULL;
462
463 return __nlmsg_put(skb, portid, seq, type, payload, flags);
464 }
465
466 /**
467 * nlmsg_put_answer - Add a new callback based netlink message to an skb
468 * @skb: socket buffer to store message in
469 * @cb: netlink callback
470 * @type: message type
471 * @payload: length of message payload
472 * @flags: message flags
473 *
474 * Returns NULL if the tailroom of the skb is insufficient to store
475 * the message header and payload.
476 */
477 static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
478 struct netlink_callback *cb,
479 int type, int payload,
480 int flags)
481 {
482 return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
483 type, payload, flags);
484 }
485
486 /**
487 * nlmsg_new - Allocate a new netlink message
488 * @payload: size of the message payload
489 * @flags: the type of memory to allocate.
490 *
491 * Use NLMSG_DEFAULT_SIZE if the size of the payload isn't known
492 * and a good default is needed.
493 */
494 static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
495 {
496 return alloc_skb(nlmsg_total_size(payload), flags);
497 }
498
499 /**
500 * nlmsg_end - Finalize a netlink message
501 * @skb: socket buffer the message is stored in
502 * @nlh: netlink message header
503 *
504 * Corrects the netlink message header to include the appeneded
505 * attributes. Only necessary if attributes have been added to
506 * the message.
507 */
508 static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
509 {
510 nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
511 }
512
513 /**
514 * nlmsg_get_pos - return current position in netlink message
515 * @skb: socket buffer the message is stored in
516 *
517 * Returns a pointer to the current tail of the message.
518 */
519 static inline void *nlmsg_get_pos(struct sk_buff *skb)
520 {
521 return skb_tail_pointer(skb);
522 }
523
524 /**
525 * nlmsg_trim - Trim message to a mark
526 * @skb: socket buffer the message is stored in
527 * @mark: mark to trim to
528 *
529 * Trims the message to the provided mark.
530 */
531 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
532 {
533 if (mark) {
534 WARN_ON((unsigned char *) mark < skb->data);
535 skb_trim(skb, (unsigned char *) mark - skb->data);
536 }
537 }
538
539 /**
540 * nlmsg_cancel - Cancel construction of a netlink message
541 * @skb: socket buffer the message is stored in
542 * @nlh: netlink message header
543 *
544 * Removes the complete netlink message including all
545 * attributes from the socket buffer again.
546 */
547 static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
548 {
549 nlmsg_trim(skb, nlh);
550 }
551
552 /**
553 * nlmsg_free - free a netlink message
554 * @skb: socket buffer of netlink message
555 */
556 static inline void nlmsg_free(struct sk_buff *skb)
557 {
558 kfree_skb(skb);
559 }
560
561 /**
562 * nlmsg_multicast - multicast a netlink message
563 * @sk: netlink socket to spread messages to
564 * @skb: netlink message as socket buffer
565 * @portid: own netlink portid to avoid sending to yourself
566 * @group: multicast group id
567 * @flags: allocation flags
568 */
569 static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
570 u32 portid, unsigned int group, gfp_t flags)
571 {
572 int err;
573
574 NETLINK_CB(skb).dst_group = group;
575
576 err = netlink_broadcast(sk, skb, portid, group, flags);
577 if (err > 0)
578 err = 0;
579
580 return err;
581 }
582
583 /**
584 * nlmsg_unicast - unicast a netlink message
585 * @sk: netlink socket to spread message to
586 * @skb: netlink message as socket buffer
587 * @portid: netlink portid of the destination socket
588 */
589 static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
590 {
591 int err;
592
593 err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
594 if (err > 0)
595 err = 0;
596
597 return err;
598 }
599
600 /**
601 * nlmsg_for_each_msg - iterate over a stream of messages
602 * @pos: loop counter, set to current message
603 * @head: head of message stream
604 * @len: length of message stream
605 * @rem: initialized to len, holds bytes currently remaining in stream
606 */
607 #define nlmsg_for_each_msg(pos, head, len, rem) \
608 for (pos = head, rem = len; \
609 nlmsg_ok(pos, rem); \
610 pos = nlmsg_next(pos, &(rem)))
611
612 /**
613 * nl_dump_check_consistent - check if sequence is consistent and advertise if not
614 * @cb: netlink callback structure that stores the sequence number
615 * @nlh: netlink message header to write the flag to
616 *
617 * This function checks if the sequence (generation) number changed during dump
618 * and if it did, advertises it in the netlink message header.
619 *
620 * The correct way to use it is to set cb->seq to the generation counter when
621 * all locks for dumping have been acquired, and then call this function for
622 * each message that is generated.
623 *
624 * Note that due to initialisation concerns, 0 is an invalid sequence number
625 * and must not be used by code that uses this functionality.
626 */
627 static inline void
628 nl_dump_check_consistent(struct netlink_callback *cb,
629 struct nlmsghdr *nlh)
630 {
631 if (cb->prev_seq && cb->seq != cb->prev_seq)
632 nlh->nlmsg_flags |= NLM_F_DUMP_INTR;
633 cb->prev_seq = cb->seq;
634 }
635
636 /**************************************************************************
637 * Netlink Attributes
638 **************************************************************************/
639
640 /**
641 * nla_attr_size - length of attribute not including padding
642 * @payload: length of payload
643 */
644 static inline int nla_attr_size(int payload)
645 {
646 return NLA_HDRLEN + payload;
647 }
648
649 /**
650 * nla_total_size - total length of attribute including padding
651 * @payload: length of payload
652 */
653 static inline int nla_total_size(int payload)
654 {
655 return NLA_ALIGN(nla_attr_size(payload));
656 }
657
658 /**
659 * nla_padlen - length of padding at the tail of attribute
660 * @payload: length of payload
661 */
662 static inline int nla_padlen(int payload)
663 {
664 return nla_total_size(payload) - nla_attr_size(payload);
665 }
666
667 /**
668 * nla_type - attribute type
669 * @nla: netlink attribute
670 */
671 static inline int nla_type(const struct nlattr *nla)
672 {
673 return nla->nla_type & NLA_TYPE_MASK;
674 }
675
676 /**
677 * nla_data - head of payload
678 * @nla: netlink attribute
679 */
680 static inline void *nla_data(const struct nlattr *nla)
681 {
682 return (char *) nla + NLA_HDRLEN;
683 }
684
685 /**
686 * nla_len - length of payload
687 * @nla: netlink attribute
688 */
689 static inline int nla_len(const struct nlattr *nla)
690 {
691 return nla->nla_len - NLA_HDRLEN;
692 }
693
694 /**
695 * nla_ok - check if the netlink attribute fits into the remaining bytes
696 * @nla: netlink attribute
697 * @remaining: number of bytes remaining in attribute stream
698 */
699 static inline int nla_ok(const struct nlattr *nla, int remaining)
700 {
701 return remaining >= (int) sizeof(*nla) &&
702 nla->nla_len >= sizeof(*nla) &&
703 nla->nla_len <= remaining;
704 }
705
706 /**
707 * nla_next - next netlink attribute in attribute stream
708 * @nla: netlink attribute
709 * @remaining: number of bytes remaining in attribute stream
710 *
711 * Returns the next netlink attribute in the attribute stream and
712 * decrements remaining by the size of the current attribute.
713 */
714 static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
715 {
716 unsigned int totlen = NLA_ALIGN(nla->nla_len);
717
718 *remaining -= totlen;
719 return (struct nlattr *) ((char *) nla + totlen);
720 }
721
722 /**
723 * nla_find_nested - find attribute in a set of nested attributes
724 * @nla: attribute containing the nested attributes
725 * @attrtype: type of attribute to look for
726 *
727 * Returns the first attribute which matches the specified type.
728 */
729 static inline struct nlattr *
730 nla_find_nested(const struct nlattr *nla, int attrtype)
731 {
732 return nla_find(nla_data(nla), nla_len(nla), attrtype);
733 }
734
735 /**
736 * nla_parse_nested - parse nested attributes
737 * @tb: destination array with maxtype+1 elements
738 * @maxtype: maximum attribute type to be expected
739 * @nla: attribute containing the nested attributes
740 * @policy: validation policy
741 *
742 * See nla_parse()
743 */
744 static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
745 const struct nlattr *nla,
746 const struct nla_policy *policy)
747 {
748 return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
749 }
750
751 /**
752 * nla_put_u8 - Add a u8 netlink attribute to a socket buffer
753 * @skb: socket buffer to add attribute to
754 * @attrtype: attribute type
755 * @value: numeric value
756 */
757 static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
758 {
759 return nla_put(skb, attrtype, sizeof(u8), &value);
760 }
761
762 /**
763 * nla_put_u16 - Add a u16 netlink attribute to a socket buffer
764 * @skb: socket buffer to add attribute to
765 * @attrtype: attribute type
766 * @value: numeric value
767 */
768 static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
769 {
770 return nla_put(skb, attrtype, sizeof(u16), &value);
771 }
772
773 /**
774 * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
775 * @skb: socket buffer to add attribute to
776 * @attrtype: attribute type
777 * @value: numeric value
778 */
779 static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
780 {
781 return nla_put(skb, attrtype, sizeof(__be16), &value);
782 }
783
784 /**
785 * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
786 * @skb: socket buffer to add attribute to
787 * @attrtype: attribute type
788 * @value: numeric value
789 */
790 static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
791 {
792 return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
793 }
794
795 /**
796 * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
797 * @skb: socket buffer to add attribute to
798 * @attrtype: attribute type
799 * @value: numeric value
800 */
801 static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
802 {
803 return nla_put(skb, attrtype, sizeof(__le16), &value);
804 }
805
806 /**
807 * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
808 * @skb: socket buffer to add attribute to
809 * @attrtype: attribute type
810 * @value: numeric value
811 */
812 static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
813 {
814 return nla_put(skb, attrtype, sizeof(u32), &value);
815 }
816
817 /**
818 * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
819 * @skb: socket buffer to add attribute to
820 * @attrtype: attribute type
821 * @value: numeric value
822 */
823 static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
824 {
825 return nla_put(skb, attrtype, sizeof(__be32), &value);
826 }
827
828 /**
829 * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
830 * @skb: socket buffer to add attribute to
831 * @attrtype: attribute type
832 * @value: numeric value
833 */
834 static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
835 {
836 return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
837 }
838
839 /**
840 * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
841 * @skb: socket buffer to add attribute to
842 * @attrtype: attribute type
843 * @value: numeric value
844 */
845 static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
846 {
847 return nla_put(skb, attrtype, sizeof(__le32), &value);
848 }
849
850 /**
851 * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
852 * @skb: socket buffer to add attribute to
853 * @attrtype: attribute type
854 * @value: numeric value
855 * @padattr: attribute type for the padding
856 */
857 static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
858 u64 value, int padattr)
859 {
860 return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
861 }
862
863 /**
864 * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
865 * @skb: socket buffer to add attribute to
866 * @attrtype: attribute type
867 * @value: numeric value
868 * @padattr: attribute type for the padding
869 */
870 static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
871 int padattr)
872 {
873 return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
874 }
875
876 /**
877 * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
878 * @skb: socket buffer to add attribute to
879 * @attrtype: attribute type
880 * @value: numeric value
881 * @padattr: attribute type for the padding
882 */
883 static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
884 int padattr)
885 {
886 return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
887 padattr);
888 }
889
890 /**
891 * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
892 * @skb: socket buffer to add attribute to
893 * @attrtype: attribute type
894 * @value: numeric value
895 * @padattr: attribute type for the padding
896 */
897 static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
898 int padattr)
899 {
900 return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
901 }
902
903 /**
904 * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
905 * @skb: socket buffer to add attribute to
906 * @attrtype: attribute type
907 * @value: numeric value
908 */
909 static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
910 {
911 return nla_put(skb, attrtype, sizeof(s8), &value);
912 }
913
914 /**
915 * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
916 * @skb: socket buffer to add attribute to
917 * @attrtype: attribute type
918 * @value: numeric value
919 */
920 static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
921 {
922 return nla_put(skb, attrtype, sizeof(s16), &value);
923 }
924
925 /**
926 * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
927 * @skb: socket buffer to add attribute to
928 * @attrtype: attribute type
929 * @value: numeric value
930 */
931 static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
932 {
933 return nla_put(skb, attrtype, sizeof(s32), &value);
934 }
935
936 /**
937 * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
938 * @skb: socket buffer to add attribute to
939 * @attrtype: attribute type
940 * @value: numeric value
941 * @padattr: attribute type for the padding
942 */
943 static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
944 int padattr)
945 {
946 return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
947 }
948
949 /**
950 * nla_put_string - Add a string netlink attribute to a socket buffer
951 * @skb: socket buffer to add attribute to
952 * @attrtype: attribute type
953 * @str: NUL terminated string
954 */
955 static inline int nla_put_string(struct sk_buff *skb, int attrtype,
956 const char *str)
957 {
958 return nla_put(skb, attrtype, strlen(str) + 1, str);
959 }
960
961 /**
962 * nla_put_flag - Add a flag netlink attribute to a socket buffer
963 * @skb: socket buffer to add attribute to
964 * @attrtype: attribute type
965 */
966 static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
967 {
968 return nla_put(skb, attrtype, 0, NULL);
969 }
970
971 /**
972 * nla_put_msecs - Add a msecs netlink attribute to a skb and align it
973 * @skb: socket buffer to add attribute to
974 * @attrtype: attribute type
975 * @njiffies: number of jiffies to convert to msecs
976 * @padattr: attribute type for the padding
977 */
978 static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
979 unsigned long njiffies, int padattr)
980 {
981 u64 tmp = jiffies_to_msecs(njiffies);
982
983 return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
984 }
985
986 /**
987 * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
988 * buffer
989 * @skb: socket buffer to add attribute to
990 * @attrtype: attribute type
991 * @addr: IPv4 address
992 */
993 static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
994 __be32 addr)
995 {
996 return nla_put_be32(skb, attrtype, addr);
997 }
998
999 /**
1000 * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
1001 * buffer
1002 * @skb: socket buffer to add attribute to
1003 * @attrtype: attribute type
1004 * @addr: IPv6 address
1005 */
1006 static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
1007 const struct in6_addr *addr)
1008 {
1009 return nla_put(skb, attrtype, sizeof(*addr), addr);
1010 }
1011
1012 /**
1013 * nla_get_u32 - return payload of u32 attribute
1014 * @nla: u32 netlink attribute
1015 */
1016 static inline u32 nla_get_u32(const struct nlattr *nla)
1017 {
1018 return *(u32 *) nla_data(nla);
1019 }
1020
1021 /**
1022 * nla_get_be32 - return payload of __be32 attribute
1023 * @nla: __be32 netlink attribute
1024 */
1025 static inline __be32 nla_get_be32(const struct nlattr *nla)
1026 {
1027 return *(__be32 *) nla_data(nla);
1028 }
1029
1030 /**
1031 * nla_get_le32 - return payload of __le32 attribute
1032 * @nla: __le32 netlink attribute
1033 */
1034 static inline __le32 nla_get_le32(const struct nlattr *nla)
1035 {
1036 return *(__le32 *) nla_data(nla);
1037 }
1038
1039 /**
1040 * nla_get_u16 - return payload of u16 attribute
1041 * @nla: u16 netlink attribute
1042 */
1043 static inline u16 nla_get_u16(const struct nlattr *nla)
1044 {
1045 return *(u16 *) nla_data(nla);
1046 }
1047
1048 /**
1049 * nla_get_be16 - return payload of __be16 attribute
1050 * @nla: __be16 netlink attribute
1051 */
1052 static inline __be16 nla_get_be16(const struct nlattr *nla)
1053 {
1054 return *(__be16 *) nla_data(nla);
1055 }
1056
1057 /**
1058 * nla_get_le16 - return payload of __le16 attribute
1059 * @nla: __le16 netlink attribute
1060 */
1061 static inline __le16 nla_get_le16(const struct nlattr *nla)
1062 {
1063 return *(__le16 *) nla_data(nla);
1064 }
1065
1066 /**
1067 * nla_get_u8 - return payload of u8 attribute
1068 * @nla: u8 netlink attribute
1069 */
1070 static inline u8 nla_get_u8(const struct nlattr *nla)
1071 {
1072 return *(u8 *) nla_data(nla);
1073 }
1074
1075 /**
1076 * nla_get_u64 - return payload of u64 attribute
1077 * @nla: u64 netlink attribute
1078 */
1079 static inline u64 nla_get_u64(const struct nlattr *nla)
1080 {
1081 u64 tmp;
1082
1083 nla_memcpy(&tmp, nla, sizeof(tmp));
1084
1085 return tmp;
1086 }
1087
1088 /**
1089 * nla_get_be64 - return payload of __be64 attribute
1090 * @nla: __be64 netlink attribute
1091 */
1092 static inline __be64 nla_get_be64(const struct nlattr *nla)
1093 {
1094 __be64 tmp;
1095
1096 nla_memcpy(&tmp, nla, sizeof(tmp));
1097
1098 return tmp;
1099 }
1100
1101 /**
1102 * nla_get_le64 - return payload of __le64 attribute
1103 * @nla: __le64 netlink attribute
1104 */
1105 static inline __le64 nla_get_le64(const struct nlattr *nla)
1106 {
1107 return *(__le64 *) nla_data(nla);
1108 }
1109
1110 /**
1111 * nla_get_s32 - return payload of s32 attribute
1112 * @nla: s32 netlink attribute
1113 */
1114 static inline s32 nla_get_s32(const struct nlattr *nla)
1115 {
1116 return *(s32 *) nla_data(nla);
1117 }
1118
1119 /**
1120 * nla_get_s16 - return payload of s16 attribute
1121 * @nla: s16 netlink attribute
1122 */
1123 static inline s16 nla_get_s16(const struct nlattr *nla)
1124 {
1125 return *(s16 *) nla_data(nla);
1126 }
1127
1128 /**
1129 * nla_get_s8 - return payload of s8 attribute
1130 * @nla: s8 netlink attribute
1131 */
1132 static inline s8 nla_get_s8(const struct nlattr *nla)
1133 {
1134 return *(s8 *) nla_data(nla);
1135 }
1136
1137 /**
1138 * nla_get_s64 - return payload of s64 attribute
1139 * @nla: s64 netlink attribute
1140 */
1141 static inline s64 nla_get_s64(const struct nlattr *nla)
1142 {
1143 s64 tmp;
1144
1145 nla_memcpy(&tmp, nla, sizeof(tmp));
1146
1147 return tmp;
1148 }
1149
1150 /**
1151 * nla_get_flag - return payload of flag attribute
1152 * @nla: flag netlink attribute
1153 */
1154 static inline int nla_get_flag(const struct nlattr *nla)
1155 {
1156 return !!nla;
1157 }
1158
1159 /**
1160 * nla_get_msecs - return payload of msecs attribute
1161 * @nla: msecs netlink attribute
1162 *
1163 * Returns the number of milliseconds in jiffies.
1164 */
1165 static inline unsigned long nla_get_msecs(const struct nlattr *nla)
1166 {
1167 u64 msecs = nla_get_u64(nla);
1168
1169 return msecs_to_jiffies((unsigned long) msecs);
1170 }
1171
1172 /**
1173 * nla_get_in_addr - return payload of IPv4 address attribute
1174 * @nla: IPv4 address netlink attribute
1175 */
1176 static inline __be32 nla_get_in_addr(const struct nlattr *nla)
1177 {
1178 return *(__be32 *) nla_data(nla);
1179 }
1180
1181 /**
1182 * nla_get_in6_addr - return payload of IPv6 address attribute
1183 * @nla: IPv6 address netlink attribute
1184 */
1185 static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
1186 {
1187 struct in6_addr tmp;
1188
1189 nla_memcpy(&tmp, nla, sizeof(tmp));
1190 return tmp;
1191 }
1192
1193 /**
1194 * nla_memdup - duplicate attribute memory (kmemdup)
1195 * @src: netlink attribute to duplicate from
1196 * @gfp: GFP mask
1197 */
1198 static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp)
1199 {
1200 return kmemdup(nla_data(src), nla_len(src), gfp);
1201 }
1202
1203 /**
1204 * nla_nest_start - Start a new level of nested attributes
1205 * @skb: socket buffer to add attributes to
1206 * @attrtype: attribute type of container
1207 *
1208 * Returns the container attribute
1209 */
1210 static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
1211 {
1212 struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
1213
1214 if (nla_put(skb, attrtype, 0, NULL) < 0)
1215 return NULL;
1216
1217 return start;
1218 }
1219
1220 /**
1221 * nla_nest_end - Finalize nesting of attributes
1222 * @skb: socket buffer the attributes are stored in
1223 * @start: container attribute
1224 *
1225 * Corrects the container attribute header to include the all
1226 * appeneded attributes.
1227 *
1228 * Returns the total data length of the skb.
1229 */
1230 static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
1231 {
1232 start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
1233 return skb->len;
1234 }
1235
1236 /**
1237 * nla_nest_cancel - Cancel nesting of attributes
1238 * @skb: socket buffer the message is stored in
1239 * @start: container attribute
1240 *
1241 * Removes the container attribute and including all nested
1242 * attributes. Returns -EMSGSIZE
1243 */
1244 static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
1245 {
1246 nlmsg_trim(skb, start);
1247 }
1248
1249 /**
1250 * nla_validate_nested - Validate a stream of nested attributes
1251 * @start: container attribute
1252 * @maxtype: maximum attribute type to be expected
1253 * @policy: validation policy
1254 *
1255 * Validates all attributes in the nested attribute stream against the
1256 * specified policy. Attributes with a type exceeding maxtype will be
1257 * ignored. See documenation of struct nla_policy for more details.
1258 *
1259 * Returns 0 on success or a negative error code.
1260 */
1261 static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
1262 const struct nla_policy *policy)
1263 {
1264 return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
1265 }
1266
1267 /**
1268 * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
1269 * @skb: socket buffer the message is stored in
1270 *
1271 * Return true if padding is needed to align the next attribute (nla_data()) to
1272 * a 64-bit aligned area.
1273 */
1274 static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
1275 {
1276 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1277 /* The nlattr header is 4 bytes in size, that's why we test
1278 * if the skb->data _is_ aligned. A NOP attribute, plus
1279 * nlattr header for next attribute, will make nla_data()
1280 * 8-byte aligned.
1281 */
1282 if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
1283 return true;
1284 #endif
1285 return false;
1286 }
1287
1288 /**
1289 * nla_align_64bit - 64-bit align the nla_data() of next attribute
1290 * @skb: socket buffer the message is stored in
1291 * @padattr: attribute type for the padding
1292 *
1293 * Conditionally emit a padding netlink attribute in order to make
1294 * the next attribute we emit have a 64-bit aligned nla_data() area.
1295 * This will only be done in architectures which do not have
1296 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
1297 *
1298 * Returns zero on success or a negative error code.
1299 */
1300 static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
1301 {
1302 if (nla_need_padding_for_64bit(skb) &&
1303 !nla_reserve(skb, padattr, 0))
1304 return -EMSGSIZE;
1305
1306 return 0;
1307 }
1308
1309 /**
1310 * nla_total_size_64bit - total length of attribute including padding
1311 * @payload: length of payload
1312 */
1313 static inline int nla_total_size_64bit(int payload)
1314 {
1315 return NLA_ALIGN(nla_attr_size(payload))
1316 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1317 + NLA_ALIGN(nla_attr_size(0))
1318 #endif
1319 ;
1320 }
1321
1322 /**
1323 * nla_for_each_attr - iterate over a stream of attributes
1324 * @pos: loop counter, set to current attribute
1325 * @head: head of attribute stream
1326 * @len: length of attribute stream
1327 * @rem: initialized to len, holds bytes currently remaining in stream
1328 */
1329 #define nla_for_each_attr(pos, head, len, rem) \
1330 for (pos = head, rem = len; \
1331 nla_ok(pos, rem); \
1332 pos = nla_next(pos, &(rem)))
1333
1334 /**
1335 * nla_for_each_nested - iterate over nested attributes
1336 * @pos: loop counter, set to current attribute
1337 * @nla: attribute containing the nested attributes
1338 * @rem: initialized to len, holds bytes currently remaining in stream
1339 */
1340 #define nla_for_each_nested(pos, nla, rem) \
1341 nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
1342
1343 /**
1344 * nla_is_last - Test if attribute is last in stream
1345 * @nla: attribute to test
1346 * @rem: bytes remaining in stream
1347 */
1348 static inline bool nla_is_last(const struct nlattr *nla, int rem)
1349 {
1350 return nla->nla_len == rem;
1351 }
1352
1353 #endif 1 /*
2 * generic net pointers
3 */
4
5 #ifndef __NET_GENERIC_H__
6 #define __NET_GENERIC_H__
7
8 #include <linux/bug.h>
9 #include <linux/rcupdate.h>
10
11 /*
12 * Generic net pointers are to be used by modules to put some private
13 * stuff on the struct net without explicit struct net modification
14 *
15 * The rules are simple:
16 * 1. set pernet_operations->id. After register_pernet_device you
17 * will have the id of your private pointer.
18 * 2. set pernet_operations->size to have the code allocate and free
19 * a private structure pointed to from struct net.
20 * 3. do not change this pointer while the net is alive;
21 * 4. do not try to have any private reference on the net_generic object.
22 *
23 * After accomplishing all of the above, the private pointer can be
24 * accessed with the net_generic() call.
25 */
26
27 struct net_generic {
28 union {
29 struct {
30 unsigned int len;
31 struct rcu_head rcu;
32 } s;
33
34 void *ptr[0];
35 };
36 };
37
38 static inline void *net_generic(const struct net *net, unsigned int id)
39 {
40 struct net_generic *ng;
41 void *ptr;
42
43 rcu_read_lock();
44 ng = rcu_dereference(net->gen);
45 ptr = ng->ptr[id];
46 rcu_read_unlock();
47
48 return ptr;
49 }
50 #endif |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-4.10-rc1.tar.xz | net/sched/act_skbmod.ko | 147_1a | CPAchecker | Bug | Fixed | 2017-03-04 23:54:57 | L0265 |
Comment
Reported: 4 Mar 2017
[Home]