Error Trace

[Home]

Bug # 7

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
95 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
19 typedef signed char __s8;
20 typedef unsigned char __u8;
22 typedef short __s16;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
30 typedef unsigned long long __u64;
16 typedef unsigned char u8;
18 typedef short s16;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
80 struct __anonstruct___kernel_fsid_t_5 { int val[2U]; } ;
80 typedef struct __anonstruct___kernel_fsid_t_5 __kernel_fsid_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
32 typedef __u16 __le16;
33 typedef __u16 __be16;
34 typedef __u32 __le32;
36 typedef __u64 __le64;
40 typedef __u32 __wsum;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
108 typedef __u32 uint32_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
147 typedef u64 dma_addr_t;
158 typedef unsigned int gfp_t;
159 typedef unsigned int fmode_t;
160 typedef unsigned int oom_flags_t;
178 struct __anonstruct_atomic_t_6 { int counter; } ;
178 typedef struct __anonstruct_atomic_t_6 atomic_t;
183 struct __anonstruct_atomic64_t_7 { long counter; } ;
183 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
184 struct list_head { struct list_head *next; struct list_head *prev; } ;
189 struct hlist_node ;
189 struct hlist_head { struct hlist_node *first; } ;
193 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
204 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
59 struct __anonstruct_ldv_1016_9 { unsigned int a; unsigned int b; } ;
59 struct __anonstruct_ldv_1031_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
59 union __anonunion_ldv_1032_8 { struct __anonstruct_ldv_1016_9 ldv_1016; struct __anonstruct_ldv_1031_10 ldv_1031; } ;
59 struct desc_struct { union __anonunion_ldv_1032_8 ldv_1032; } ;
12 typedef unsigned long pteval_t;
15 typedef unsigned long pgdval_t;
16 typedef unsigned long pgprotval_t;
18 struct __anonstruct_pte_t_11 { pteval_t pte; } ;
18 typedef struct __anonstruct_pte_t_11 pte_t;
20 struct pgprot { pgprotval_t pgprot; } ;
242 typedef struct pgprot pgprot_t;
244 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ;
244 typedef struct __anonstruct_pgd_t_12 pgd_t;
332 struct page ;
332 typedef struct page *pgtable_t;
340 struct file ;
353 struct seq_file ;
390 struct thread_struct ;
392 struct mm_struct ;
393 struct task_struct ;
394 struct cpumask ;
327 struct arch_spinlock ;
18 typedef u16 __ticket_t;
19 typedef u32 __ticketpair_t;
20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ;
32 union __anonunion_ldv_1452_15 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ;
32 struct arch_spinlock { union __anonunion_ldv_1452_15 ldv_1452; } ;
33 typedef struct arch_spinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
142 typedef void (*ctor_fn_t)();
222 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
48 struct device ;
54 struct net_device ;
403 struct file_operations ;
415 struct completion ;
416 struct pid ;
686 struct lockdep_map ;
127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ;
79 union __anonunion_ldv_2857_16 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ;
79 struct math_emu_info { long ___orig_eip; union __anonunion_ldv_2857_16 ldv_2857; } ;
306 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
102 struct cpumask { unsigned long bits[128U]; } ;
14 typedef struct cpumask cpumask_t;
671 typedef struct cpumask *cpumask_var_t;
161 struct seq_operations ;
293 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
311 struct __anonstruct_ldv_5225_21 { u64 rip; u64 rdp; } ;
311 struct __anonstruct_ldv_5231_22 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
311 union __anonunion_ldv_5232_20 { struct __anonstruct_ldv_5225_21 ldv_5225; struct __anonstruct_ldv_5231_22 ldv_5231; } ;
311 union __anonunion_ldv_5241_23 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
311 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion_ldv_5232_20 ldv_5232; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion_ldv_5241_23 ldv_5241; } ;
345 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
366 struct ymmh_struct { u32 ymmh_space[64U]; } ;
371 struct lwp_struct { u8 reserved[128U]; } ;
376 struct bndregs_struct { u64 bndregs[8U]; } ;
380 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ;
385 struct xsave_hdr_struct { u64 xstate_bv; u64 xcomp_bv; u64 reserved[6U]; } ;
391 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ;
400 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ;
408 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ;
464 struct kmem_cache ;
465 struct perf_event ;
466 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ;
23 typedef atomic64_t atomic_long_t;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
26 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ;
530 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct_ldv_6072_27 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion_ldv_6073_26 { struct raw_spinlock rlock; struct __anonstruct_ldv_6072_27 ldv_6072; } ;
33 struct spinlock { union __anonunion_ldv_6073_26 ldv_6073; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_28 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_28 rwlock_t;
23 struct optimistic_spin_queue { atomic_t tail; } ;
26 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ;
68 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
16 struct timespec ;
412 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
152 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
51 typedef struct seqcount seqcount_t;
284 struct __anonstruct_seqlock_t_97 { struct seqcount seqcount; spinlock_t lock; } ;
284 typedef struct __anonstruct_seqlock_t_97 seqlock_t;
458 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
323 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
105 struct tvec_base ;
106 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
266 struct workqueue_struct ;
267 struct work_struct ;
53 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
106 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
12 struct __wait_queue ;
12 typedef struct __wait_queue wait_queue_t;
15 struct __wait_queue { unsigned int flags; void *private; int (*func)(wait_queue_t *, unsigned int, int, void *); struct list_head task_list; } ;
35 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
40 typedef struct __wait_queue_head wait_queue_head_t;
1029 struct completion { unsigned int done; wait_queue_head_t wait; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
546 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ;
553 struct dev_pm_qos ;
553 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
614 struct dev_pm_domain { struct dev_pm_ops ops; } ;
98 struct __anonstruct_nodemask_t_98 { unsigned long bits[16U]; } ;
98 typedef struct __anonstruct_nodemask_t_98 nodemask_t;
22 struct __anonstruct_mm_context_t_99 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ;
22 typedef struct __anonstruct_mm_context_t_99 mm_context_t;
18 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
40 struct rb_root { struct rb_node *rb_node; } ;
87 struct vm_area_struct ;
22 struct bio_vec ;
167 struct notifier_block ;
169 struct device_node ;
128 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
71 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct_ldv_12314_127 { spinlock_t lock; unsigned int count; } ;
114 union __anonunion_ldv_12315_126 { struct __anonstruct_ldv_12314_127 ldv_12314; } ;
114 struct lockref { union __anonunion_ldv_12315_126 ldv_12315; } ;
49 struct nameidata ;
50 struct path ;
51 struct vfsmount ;
52 struct __anonstruct_ldv_12339_129 { u32 hash; u32 len; } ;
52 union __anonunion_ldv_12341_128 { struct __anonstruct_ldv_12339_129 ldv_12339; u64 hash_len; } ;
52 struct qstr { union __anonunion_ldv_12341_128 ldv_12341; const unsigned char *name; } ;
90 struct inode ;
90 struct dentry_operations ;
90 struct super_block ;
90 union __anonunion_d_u_130 { struct list_head d_child; struct callback_head d_rcu; } ;
90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_130 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ;
142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ;
478 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_131 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_131 kuid_t;
27 struct __anonstruct_kgid_t_132 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_132 kgid_t;
127 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
44 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ;
30 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ;
58 struct __anonstruct_ldv_12848_134 { struct radix_tree_node *parent; void *private_data; } ;
58 union __anonunion_ldv_12850_133 { struct __anonstruct_ldv_12848_134 ldv_12848; struct callback_head callback_head; } ;
58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion_ldv_12850_133 ldv_12850; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
428 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
435 struct pid_namespace ;
435 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ;
26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
70 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
76 struct rw_semaphore ;
77 struct rw_semaphore { long count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
27 struct bio_set ;
28 struct bio ;
29 struct bio_integrity_payload ;
30 struct block_device ;
31 struct io_context ;
32 struct cgroup_subsys_state ;
17 typedef void bio_end_io_t(struct bio *, int);
19 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
28 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ;
40 struct bio { struct bio *bi_next; struct block_device *bi_bdev; unsigned long bi_flags; unsigned long bi_rw; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; struct bio_integrity_payload *bi_integrity; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ;
59 struct export_operations ;
61 struct iovec ;
62 struct kiocb ;
63 struct kobject ;
64 struct pipe_inode_info ;
65 struct poll_table_struct ;
66 struct kstatfs ;
67 struct cred ;
68 struct swap_info_struct ;
69 struct iov_iter ;
69 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
253 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ;
76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ;
151 typedef struct fs_qfilestat fs_qfilestat_t;
152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ;
166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ;
196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ;
212 struct dquot ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_136 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_136 kprojid_t;
119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ;
152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
60 typedef long long qsize_t;
61 union __anonunion_ldv_13718_137 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
61 struct kqid { union __anonunion_ldv_13718_137 ldv_13718; enum quota_type type; } ;
178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ;
200 struct quota_format_type ;
201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ;
264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ;
302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ;
316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
334 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
380 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ;
410 struct address_space ;
411 struct writeback_control ;
323 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t ); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
382 struct backing_dev_info ;
383 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
405 struct request_queue ;
406 struct hd_struct ;
406 struct gendisk ;
406 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
503 struct posix_acl ;
504 struct inode_operations ;
504 union __anonunion_ldv_14146_140 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
504 union __anonunion_ldv_14166_141 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
504 struct file_lock ;
504 struct cdev ;
504 union __anonunion_ldv_14183_142 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ;
504 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion_ldv_14146_140 ldv_14146; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion_ldv_14166_141 ldv_14166; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion_ldv_14183_142 ldv_14183; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ;
740 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
748 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
771 union __anonunion_f_u_143 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
771 struct file { union __anonunion_f_u_143 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
861 typedef void *fl_owner_t;
862 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
867 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ;
880 struct net ;
885 struct nlm_lockowner ;
886 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_145 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_144 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_145 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_144 fl_u; } ;
988 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1182 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ;
1198 struct file_system_type ;
1198 struct super_operations ;
1198 struct xattr_handler ;
1198 struct mtd_info ;
1198 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ;
1429 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1467 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1472 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ;
1514 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1561 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ;
1775 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
2733 struct ctl_table ;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
51 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct_ldv_16476_151 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct_ldv_16480_152 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion_ldv_16481_150 { struct __anonstruct_ldv_16476_151 ldv_16476; struct __anonstruct_ldv_16480_152 ldv_16480; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion_ldv_16481_150 ldv_16481; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct xol_area ;
95 struct uprobes_state { struct xol_area *xol_area; } ;
133 union __anonunion_ldv_16589_153 { struct address_space *mapping; void *s_mem; } ;
133 union __anonunion_ldv_16595_155 { unsigned long index; void *freelist; bool pfmemalloc; } ;
133 struct __anonstruct_ldv_16605_159 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
133 union __anonunion_ldv_16607_158 { atomic_t _mapcount; struct __anonstruct_ldv_16605_159 ldv_16605; int units; } ;
133 struct __anonstruct_ldv_16609_157 { union __anonunion_ldv_16607_158 ldv_16607; atomic_t _count; } ;
133 union __anonunion_ldv_16611_156 { unsigned long counters; struct __anonstruct_ldv_16609_157 ldv_16609; unsigned int active; } ;
133 struct __anonstruct_ldv_16612_154 { union __anonunion_ldv_16595_155 ldv_16595; union __anonunion_ldv_16611_156 ldv_16611; } ;
133 struct __anonstruct_ldv_16619_161 { struct page *next; int pages; int pobjects; } ;
133 struct slab ;
133 union __anonunion_ldv_16624_160 { struct list_head lru; struct __anonstruct_ldv_16619_161 ldv_16619; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ;
133 union __anonunion_ldv_16630_162 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ;
133 struct page { unsigned long flags; union __anonunion_ldv_16589_153 ldv_16589; struct __anonstruct_ldv_16612_154 ldv_16612; union __anonunion_ldv_16624_160 ldv_16624; union __anonunion_ldv_16630_162 ldv_16630; unsigned long debug_flags; } ;
187 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
239 struct __anonstruct_linear_164 { struct rb_node rb; unsigned long rb_subtree_last; } ;
239 union __anonunion_shared_163 { struct __anonstruct_linear_164 linear; struct list_head nonlinear; } ;
239 struct anon_vma ;
239 struct vm_operations_struct ;
239 struct mempolicy ;
239 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_163 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ;
311 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
317 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
330 struct task_rss_stat { int events; int count[3U]; } ;
338 struct mm_rss_stat { atomic_long_t count[3U]; } ;
343 struct kioctx_table ;
344 struct linux_binfmt ;
344 struct mmu_notifier_mm ;
344 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_165 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_165 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_167 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_168 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_169 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_170 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__sigfault_171 { void *_addr; short _addr_lsb; } ;
11 struct __anonstruct__sigpoll_172 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_173 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_166 { int _pad[28U]; struct __anonstruct__kill_167 _kill; struct __anonstruct__timer_168 _timer; struct __anonstruct__rt_169 _rt; struct __anonstruct__sigchld_170 _sigchld; struct __anonstruct__sigfault_171 _sigfault; struct __anonstruct__sigpoll_172 _sigpoll; struct __anonstruct__sigsys_173 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_166 _sifields; } ;
109 typedef struct siginfo siginfo_t;
21 struct sigpending { struct list_head list; sigset_t signal; } ;
246 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
260 struct k_sigaction { struct sigaction sa; } ;
53 struct seccomp_filter ;
54 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ;
132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ;
163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
451 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
836 struct nsproxy ;
837 struct ctl_table_root ;
838 struct ctl_table_header ;
839 struct ctl_dir ;
37 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
57 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
96 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
117 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
122 struct __anonstruct_ldv_19199_177 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
122 union __anonunion_ldv_19201_176 { struct __anonstruct_ldv_19199_177 ldv_19199; struct callback_head rcu; } ;
122 struct ctl_table_set ;
122 struct ctl_table_header { union __anonunion_ldv_19201_176 ldv_19201; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ;
143 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
149 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
154 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
191 struct assoc_array_ptr ;
191 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
123 union __anonunion_ldv_19345_178 { struct list_head graveyard_link; struct rb_node serial_node; } ;
123 struct key_user ;
123 union __anonunion_ldv_19353_179 { time_t expiry; time_t revoked_at; } ;
123 struct __anonstruct_ldv_19366_181 { struct key_type *type; char *description; } ;
123 union __anonunion_ldv_19367_180 { struct keyring_index_key index_key; struct __anonstruct_ldv_19366_181 ldv_19366; } ;
123 union __anonunion_type_data_182 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ;
123 union __anonunion_payload_184 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ;
123 union __anonunion_ldv_19382_183 { union __anonunion_payload_184 payload; struct assoc_array keys; } ;
123 struct key { atomic_t usage; key_serial_t serial; union __anonunion_ldv_19345_178 ldv_19345; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion_ldv_19353_179 ldv_19353; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion_ldv_19367_180 ldv_19367; union __anonunion_type_data_182 type_data; union __anonunion_ldv_19382_183 ldv_19382; } ;
358 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
126 struct futex_pi_state ;
127 struct robust_list_head ;
128 struct bio_list ;
129 struct fs_struct ;
130 struct perf_event_context ;
131 struct blk_plug ;
180 struct cfs_rq ;
181 struct task_group ;
426 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
465 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
473 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
480 struct cputime { cputime_t utime; cputime_t stime; } ;
492 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
512 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ;
554 struct autogroup ;
555 struct tty_struct ;
555 struct taskstats ;
555 struct tty_audit_buf ;
555 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
735 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
778 struct reclaim_state ;
779 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
794 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
1060 struct uts_namespace ;
1061 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1069 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ;
1081 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1116 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1148 struct rt_rq ;
1148 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1164 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1222 struct mem_cgroup ;
1222 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ;
1639 struct sched_class ;
1639 struct files_struct ;
1639 struct css_set ;
1639 struct compat_robust_list_head ;
1639 struct numa_group ;
1639 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned long atomic_flags; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults_memory; unsigned long total_numa_faults; unsigned long *numa_faults_buffer_memory; unsigned long *numa_faults_cpu; unsigned long *numa_faults_buffer_cpu; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ;
12 enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ;
29 struct writeback_control { long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned char for_kupdate; unsigned char for_background; unsigned char tagged_writepages; unsigned char for_reclaim; unsigned char range_cyclic; unsigned char for_sync; } ;
88 struct bdi_writeback ;
39 typedef int congested_fn(void *, int);
48 struct bdi_writeback { struct backing_dev_info *bdi; unsigned int nr; unsigned long last_old_flush; struct delayed_work dwork; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; spinlock_t list_lock; } ;
63 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned long state; unsigned int capabilities; congested_fn *congested_fn; void *congested_data; char *name; struct percpu_counter bdi_stat[4U]; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; struct bdi_writeback wb; spinlock_t wb_lock; struct list_head work_list; struct device *dev; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
48 union __anonunion_ldv_21950_191 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
48 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion_ldv_21950_191 ldv_21950; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
185 struct kernfs_open_node ;
186 struct kernfs_iattrs ;
209 struct kernfs_root ;
209 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion_ldv_22091_192 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion_ldv_22091_192 ldv_22091; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;
155 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
171 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
187 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
464 struct sock ;
465 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
471 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
131 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
470 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
114 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
122 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
130 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
147 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
222 struct kernel_param ;
227 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
58 struct kparam_string ;
58 struct kparam_array ;
58 union __anonunion_ldv_22771_193 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion_ldv_22771_193 ldv_22771; } ;
70 struct kparam_string { unsigned int maxlen; char *string; } ;
76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
466 struct mod_arch_specific { } ;
36 struct module_param_attrs ;
36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
72 struct exception_table_entry ;
205 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
212 struct module_ref { unsigned long incs; unsigned long decs; } ;
226 struct module_sect_attrs ;
226 struct module_notes_attrs ;
226 struct tracepoint ;
226 struct ftrace_event_call ;
226 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
655 struct mnt_namespace ;
656 struct vfsmount { struct dentry *mnt_root; struct super_block *mnt_sb; int mnt_flags; } ;
93 struct match_token { int token; const char *pattern; } ;
26 struct __anonstruct_substring_t_194 { char *from; char *to; } ;
26 typedef struct __anonstruct_substring_t_194 substring_t;
35 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ;
35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
368 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ;
48 struct kmem_cache_order_objects { unsigned long x; } ;
58 struct memcg_cache_params ;
58 struct kmem_cache_node ;
58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; struct kset *memcg_kset; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ;
501 struct __anonstruct_ldv_23637_196 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ;
501 struct __anonstruct_ldv_23643_197 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; atomic_t nr_pages; } ;
501 union __anonunion_ldv_23644_195 { struct __anonstruct_ldv_23637_196 ldv_23637; struct __anonstruct_ldv_23643_197 ldv_23643; } ;
501 struct memcg_cache_params { bool is_root_cache; union __anonunion_ldv_23644_195 ldv_23644; } ;
7 struct kstatfs { long f_type; long f_bsize; u64 f_blocks; u64 f_bfree; u64 f_bavail; u64 f_files; u64 f_ffree; __kernel_fsid_t f_fsid; long f_namelen; long f_frsize; long f_flags; long f_spare[4U]; } ;
21 struct ipc_namespace ;
22 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; } ;
84 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
27 union __anonunion_ldv_23787_198 { const struct iovec *iov; const struct bio_vec *bvec; } ;
27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion_ldv_23787_198 ldv_23787; unsigned long nr_segs; } ;
11 typedef unsigned short __kernel_sa_family_t;
12 struct __kernel_sockaddr_storage { __kernel_sa_family_t ss_family; char __data[126U]; } ;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
38 struct msghdr { void *msg_name; int msg_namelen; struct iovec *msg_iov; __kernel_size_t msg_iovlen; void *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; } ;
235 struct prot_inuse ;
236 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
145 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[8U]; } ;
106 struct linux_mib { unsigned long mibs[103U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct proc_dir_entry ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; } ;
24 struct sk_buff ;
178 struct tcpm_hash_bucket ;
179 struct ipv4_devconf ;
180 struct fib_rules_ops ;
181 struct fib_table ;
182 struct local_ports { seqlock_t lock; int range[2U]; } ;
22 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
27 struct inet_peer_base ;
27 struct xt_table ;
27 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; struct sock *fibnl; struct sock **icmp_sk; struct inet_peer_base *peers; struct tcpm_hash_bucket *tcp_metrics_hash; unsigned int tcp_metrics_hash_log; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; atomic_t rt_genid; } ;
101 struct dst_entry ;
103 struct neighbour ;
103 struct dst_ops { unsigned short family; __be16 protocol; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int fwmark_reflect; } ;
36 struct ipv6_devconf ;
36 struct rt6_info ;
36 struct rt6_statistics ;
36 struct fib6_table ;
36 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct dst_ops ip6_dst_ops; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t rt_genid; } ;
81 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
87 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; int max_dsize; } ;
21 struct sctp_mib ;
22 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
133 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
114 enum ldv_18882 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ;
53 typedef enum ldv_18882 socket_state;
70 struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; struct callback_head rcu; } ;
94 struct proto_ops ;
94 struct socket { socket_state state; short type; unsigned long flags; struct socket_wq *wq; struct file *file; struct sock *sk; const struct proto_ops *ops; } ;
120 struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, int); int (*getname)(struct socket *, struct sockaddr *, int *, int); unsigned int (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, unsigned long); int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, char *, unsigned int); int (*getsockopt)(struct socket *, int, int, char *, int *); int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct socket *, int, int, char *, int *); int (*sendmsg)(struct kiocb *, struct socket *, struct msghdr *, size_t ); int (*recvmsg)(struct kiocb *, struct socket *, struct msghdr *, size_t , int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*sendpage)(struct socket *, struct page *, int, size_t , int); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*set_peek_off)(struct sock *, int); } ;
62 struct exception_table_entry { int insn; int fixup; } ;
155 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
67 struct pinctrl ;
68 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
42 struct dma_map_ops ;
42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
14 struct device_private ;
15 struct device_driver ;
16 struct driver_private ;
17 struct class ;
18 struct subsys_private ;
19 struct bus_type ;
20 struct iommu_ops ;
21 struct iommu_group ;
60 struct device_attribute ;
60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
138 struct device_type ;
195 struct of_device_id ;
195 struct acpi_device_id ;
195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
321 struct class_attribute ;
321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
642 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
651 struct acpi_device ;
652 struct acpi_dev_node { struct acpi_device *companion; } ;
658 struct dma_coherent_mem ;
658 struct cma ;
658 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
805 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
188 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; unsigned long max_pgoff; pte_t *pte; } ;
221 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ;
2119 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
17 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
38 typedef s32 dma_cookie_t;
1164 struct dma_attrs { unsigned long flags[1U]; } ;
70 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
77 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
33 struct nf_conntrack { atomic_t use; } ;
136 struct nf_bridge_info { atomic_t use; unsigned int mask; struct net_device *physindev; struct net_device *physoutdev; unsigned long data[4U]; } ;
146 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
361 typedef unsigned int sk_buff_data_t;
362 struct __anonstruct_ldv_30927_208 { u32 stamp_us; u32 stamp_jiffies; } ;
362 union __anonunion_ldv_30928_207 { u64 v64; struct __anonstruct_ldv_30927_208 ldv_30927; } ;
362 struct skb_mstamp { union __anonunion_ldv_30928_207 ldv_30928; } ;
415 union __anonunion_ldv_30947_209 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
415 struct sec_path ;
415 struct __anonstruct_ldv_30963_211 { __u16 csum_start; __u16 csum_offset; } ;
415 union __anonunion_ldv_30964_210 { __wsum csum; struct __anonstruct_ldv_30963_211 ldv_30963; } ;
415 union __anonunion_ldv_31004_212 { unsigned int napi_id; dma_cookie_t dma_cookie; } ;
415 union __anonunion_ldv_31010_213 { __u32 mark; __u32 dropcount; __u32 reserved_tailroom; } ;
415 struct sk_buff { struct sk_buff *next; struct sk_buff *prev; union __anonunion_ldv_30947_209 ldv_30947; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; struct sec_path *sp; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; union __anonunion_ldv_30964_210 ldv_30964; __u32 priority; unsigned char ignore_df; unsigned char cloned; unsigned char ip_summed; unsigned char nohdr; unsigned char nfctinfo; unsigned char pkt_type; unsigned char fclone; unsigned char ipvs_property; unsigned char peeked; unsigned char nf_trace; __be16 protocol; void (*destructor)(struct sk_buff *); struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; __u16 tc_index; __u16 tc_verd; __u16 queue_mapping; unsigned char ndisc_nodetype; unsigned char pfmemalloc; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char head_frag; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; union __anonunion_ldv_31004_212 ldv_31004; __u32 secmark; union __anonunion_ldv_31010_213 ldv_31010; __be16 inner_protocol; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __u16 transport_header; __u16 network_header; __u16 mac_header; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
337 struct nf_logger ;
338 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; } ;
17 struct ebt_table ;
18 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ;
18 struct hlist_nulls_node ;
18 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
20 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ;
25 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
30 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
44 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
49 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
54 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ;
65 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; struct hlist_nulls_head tmpl; } ;
73 struct ip_conntrack_stat ;
73 struct nf_ct_event_notifier ;
73 struct nf_exp_event_notifier ;
73 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; char *slabname; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; unsigned int htable_size; seqcount_t generation; struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; struct hlist_head *nat_bysource; unsigned int nat_htable_size; } ;
115 struct nft_af_info ;
116 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; unsigned int base_seq; u8 gencursor; } ;
450 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
663 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; } ;
17 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[6U]; struct xfrm_policy_hash policy_bydst[6U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
74 struct net_generic ;
75 struct netns_ipvs ;
76 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; unsigned int proc_inum; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
25 struct __anonstruct_i32_229 { u32 ino; u32 gen; u32 parent_ino; u32 parent_gen; } ;
25 struct __anonstruct_udf_230 { u32 block; u16 partref; u16 parent_partref; u32 generation; u32 parent_block; u32 parent_generation; } ;
25 union __anonunion_ldv_34941_228 { struct __anonstruct_i32_229 i32; struct __anonstruct_udf_230 udf; __u32 raw[0U]; } ;
25 struct fid { union __anonunion_ldv_34941_228 ldv_34941; } ;
123 struct export_operations { int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); int (*get_name)(struct dentry *, char *, struct dentry *); struct dentry * (*get_parent)(struct dentry *); int (*commit_metadata)(struct inode *); } ;
11 typedef void * mempool_alloc_t(gfp_t , void *);
12 typedef void mempool_free_t(void *, void *);
13 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ;
24 typedef struct mempool_s mempool_t;
38 struct ceph_entity_name { __u8 type; __le64 num; } ;
58 struct ceph_entity_addr { __le32 type; __le32 nonce; struct __kernel_sockaddr_storage in_addr; } ;
67 struct ceph_entity_inst { struct ceph_entity_name name; struct ceph_entity_addr addr; } ;
72 struct ceph_msg_connect { __le64 features; __le32 host_type; __le32 global_seq; __le32 connect_seq; __le32 protocol_version; __le32 authorizer_protocol; __le32 authorizer_len; __u8 flags; } ;
106 struct ceph_msg_connect_reply { __u8 tag; __le64 features; __le32 global_seq; __le32 connect_seq; __le32 protocol_version; __le32 authorizer_len; __u8 flags; } ;
140 struct ceph_msg_header { __le64 seq; __le64 tid; __le16 type; __le16 priority; __le16 version; __le32 front_len; __le32 middle_len; __le32 data_len; __le16 data_off; struct ceph_entity_name src; __le32 reserved; __le32 crc; } ;
158 struct ceph_msg_footer { __le32 front_crc; __le32 middle_crc; __le32 data_crc; __u8 flags; } ;
171 struct ceph_fsid { unsigned char fsid[16U]; } ;
28 struct ceph_timespec { __le32 tv_sec; __le32 tv_nsec; } ;
27 struct ceph_buffer { struct kref kref; struct kvec vec; size_t alloc_len; } ;
37 struct ceph_msg ;
38 struct ceph_connection ;
39 struct ceph_auth_handshake ;
39 struct ceph_connection_operations { struct ceph_connection * (*get)(struct ceph_connection *); void (*put)(struct ceph_connection *); void (*dispatch)(struct ceph_connection *, struct ceph_msg *); struct ceph_auth_handshake * (*get_authorizer)(struct ceph_connection *, int *, int); int (*verify_authorizer_reply)(struct ceph_connection *, int); int (*invalidate_authorizer)(struct ceph_connection *); void (*fault)(struct ceph_connection *); void (*peer_reset)(struct ceph_connection *); struct ceph_msg * (*alloc_msg)(struct ceph_connection *, struct ceph_msg_header *, int *); } ;
44 struct ceph_messenger { struct ceph_entity_inst inst; struct ceph_entity_addr my_enc_addr; atomic_t stopping; bool nocrc; u32 global_seq; spinlock_t global_seq_lock; u64 supported_features; u64 required_features; } ;
67 enum ceph_msg_data_type { CEPH_MSG_DATA_NONE = 0, CEPH_MSG_DATA_PAGES = 1, CEPH_MSG_DATA_PAGELIST = 2, CEPH_MSG_DATA_BIO = 3 } ;
91 struct __anonstruct_ldv_36463_250 { struct bio *bio; size_t bio_length; } ;
91 struct __anonstruct_ldv_36468_251 { struct page **pages; size_t length; unsigned int alignment; } ;
91 struct ceph_pagelist ;
91 union __anonunion_ldv_36471_249 { struct __anonstruct_ldv_36463_250 ldv_36463; struct __anonstruct_ldv_36468_251 ldv_36468; struct ceph_pagelist *pagelist; } ;
91 struct ceph_msg_data { struct list_head links; enum ceph_msg_data_type type; union __anonunion_ldv_36471_249 ldv_36471; } ;
110 struct __anonstruct_ldv_36483_253 { struct bio *bio; struct bvec_iter bvec_iter; } ;
110 struct __anonstruct_ldv_36488_254 { unsigned int page_offset; unsigned short page_index; unsigned short page_count; } ;
110 struct __anonstruct_ldv_36492_255 { struct page *page; size_t offset; } ;
110 union __anonunion_ldv_36493_252 { struct __anonstruct_ldv_36483_253 ldv_36483; struct __anonstruct_ldv_36488_254 ldv_36488; struct __anonstruct_ldv_36492_255 ldv_36492; } ;
110 struct ceph_msg_data_cursor { size_t total_resid; struct list_head *data_head; struct ceph_msg_data *data; size_t resid; bool last_piece; bool need_crc; union __anonunion_ldv_36493_252 ldv_36493; } ;
137 struct ceph_msgpool ;
137 struct ceph_msg { struct ceph_msg_header hdr; struct ceph_msg_footer footer; struct kvec front; struct ceph_buffer *middle; size_t data_length; struct list_head data; struct ceph_msg_data_cursor cursor; struct ceph_connection *con; struct list_head list_head; struct kref kref; bool more_to_follow; bool needs_out_seq; int front_alloc_len; unsigned long ack_stamp; struct ceph_msgpool *pool; } ;
164 struct ceph_connection { void *private; const struct ceph_connection_operations *ops; struct ceph_messenger *msgr; atomic_t sock_state; struct socket *sock; struct ceph_entity_addr peer_addr; struct ceph_entity_addr peer_addr_for_me; unsigned long flags; unsigned long state; const char *error_msg; struct ceph_entity_name peer_name; u64 peer_features; u32 connect_seq; u32 peer_global_seq; int auth_retry; void *auth_reply_buf; int auth_reply_buf_len; struct mutex mutex; struct list_head out_queue; struct list_head out_sent; u64 out_seq; u64 in_seq; u64 in_seq_acked; char in_banner[30U]; struct ceph_msg_connect out_connect; struct ceph_msg_connect_reply in_reply; struct ceph_entity_addr actual_peer_addr; struct ceph_msg *out_msg; bool out_msg_done; struct kvec out_kvec[8U]; struct kvec *out_kvec_cur; int out_kvec_left; int out_skip; int out_kvec_bytes; bool out_kvec_is_msg; int out_more; __le64 out_temp_ack; struct ceph_msg_header in_hdr; struct ceph_msg *in_msg; u32 in_front_crc; u32 in_middle_crc; u32 in_data_crc; char in_tag; int in_base_pos; __le64 in_temp_ack; struct delayed_work work; unsigned long delay; } ;
293 struct ceph_msgpool { const char *name; mempool_t *pool; int type; int front_len; } ;
25 struct ceph_client ;
27 struct ceph_auth_client ;
28 struct ceph_monmap { struct ceph_fsid fsid; u32 epoch; u32 num_mon; struct ceph_entity_inst mon_inst[0U]; } ;
23 struct ceph_mon_client ;
58 struct ceph_mon_client { struct ceph_client *client; struct ceph_monmap *monmap; struct mutex mutex; struct delayed_work delayed_work; struct ceph_auth_client *auth; struct ceph_msg *m_auth; struct ceph_msg *m_auth_reply; struct ceph_msg *m_subscribe; struct ceph_msg *m_subscribe_ack; int pending_auth; bool hunting; int cur_mon; unsigned long sub_sent; unsigned long sub_renew_after; struct ceph_connection con; struct rb_root generic_request_tree; int num_generic_requests; u64 last_tid; int want_mdsmap; int want_next_osdmap; u32 have_osdmap; u32 have_mdsmap; struct dentry *debugfs_file; } ;
219 struct crush_rule_step { __u32 op; __s32 arg1; __s32 arg2; } ;
53 struct crush_rule_mask { __u8 ruleset; __u8 type; __u8 min_size; __u8 max_size; } ;
75 struct crush_rule { __u32 len; struct crush_rule_mask mask; struct crush_rule_step steps[0U]; } ;
108 struct crush_bucket { __s32 id; __u16 type; __u8 alg; __u8 hash; __u32 weight; __u32 size; __s32 *items; __u32 perm_x; __u32 perm_n; __u32 *perm; } ;
151 struct crush_map { struct crush_bucket **buckets; struct crush_rule **rules; __s32 max_buckets; __u32 max_rules; __s32 max_devices; __u32 choose_local_tries; __u32 choose_local_fallback_tries; __u32 choose_total_tries; __u32 chooseleaf_descend_once; __u8 chooseleaf_vary_r; } ;
86 struct ceph_osdmap { struct ceph_fsid fsid; u32 epoch; u32 mkfs_epoch; struct ceph_timespec created; struct ceph_timespec modified; u32 flags; u32 max_osd; u8 *osd_state; u32 *osd_weight; struct ceph_entity_addr *osd_addr; struct rb_root pg_temp; struct rb_root primary_temp; u32 *osd_primary_affinity; struct rb_root pg_pools; u32 pool_max; struct crush_map *crush; struct mutex crush_scratch_mutex; int crush_scratch_ary[48U]; } ;
224 struct ceph_authorizer ;
225 struct ceph_auth_handshake { struct ceph_authorizer *authorizer; void *authorizer_buf; size_t authorizer_buf_len; void *authorizer_reply_buf; size_t authorizer_reply_buf_len; } ;
24 struct ceph_auth_client_ops { const char *name; int (*is_authenticated)(struct ceph_auth_client *); int (*should_authenticate)(struct ceph_auth_client *); int (*build_request)(struct ceph_auth_client *, void *, void *); int (*handle_reply)(struct ceph_auth_client *, int, void *, void *); int (*create_authorizer)(struct ceph_auth_client *, int, struct ceph_auth_handshake *); int (*update_authorizer)(struct ceph_auth_client *, int, struct ceph_auth_handshake *); int (*verify_authorizer_reply)(struct ceph_auth_client *, struct ceph_authorizer *, size_t ); void (*destroy_authorizer)(struct ceph_auth_client *, struct ceph_authorizer *); void (*invalidate_authorizer)(struct ceph_auth_client *, int); void (*reset)(struct ceph_auth_client *); void (*destroy)(struct ceph_auth_client *); } ;
70 struct ceph_crypto_key ;
70 struct ceph_auth_client { u32 protocol; void *private; const struct ceph_auth_client_ops *ops; bool negotiating; const char *name; u64 global_id; const struct ceph_crypto_key *key; unsigned int want_keys; struct mutex mutex; } ;
114 struct ceph_pagelist { struct list_head head; void *mapped_tail; size_t length; size_t room; struct list_head free_list; size_t num_pages_free; } ;
76 struct ceph_osd_client ;
194 struct ceph_osd_client { struct ceph_client *client; struct ceph_osdmap *osdmap; struct rw_semaphore map_sem; struct completion map_waiters; u64 last_requested_map; struct mutex request_mutex; struct rb_root osds; struct list_head osd_lru; u64 timeout_tid; u64 last_tid; struct rb_root requests; struct list_head req_lru; struct list_head req_unsent; struct list_head req_notarget; struct list_head req_linger; int num_requests; struct delayed_work timeout_work; struct delayed_work osds_timeout_work; struct dentry *debugfs_file; mempool_t *req_mempool; struct ceph_msgpool msgpool_op; struct ceph_msgpool msgpool_op_reply; spinlock_t event_lock; struct rb_root event_tree; u64 event_count; struct workqueue_struct *notify_wq; } ;
365 struct ceph_options { int flags; struct ceph_fsid fsid; struct ceph_entity_addr my_addr; int mount_timeout; int osd_idle_ttl; int osd_keepalive_timeout; struct ceph_entity_addr *mon_addr; int num_mon; char *name; struct ceph_crypto_key *key; } ;
103 struct ceph_client { struct ceph_fsid fsid; bool have_fsid; void *private; struct ceph_options *options; struct mutex mount_mutex; wait_queue_head_t auth_wq; int auth_err; int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *); u64 supported_features; u64 required_features; struct ceph_messenger msgr; struct ceph_mon_client monc; struct ceph_osd_client osdc; struct dentry *debugfs_dir; struct dentry *debugfs_monmap; struct dentry *debugfs_osdmap; } ;
31 struct ceph_crypto_key { int type; struct ceph_timespec created; int len; void *key; } ;
886 typedef int ldv_func_ret_type;
15 typedef signed char s8;
35 typedef __u32 __be32;
106 typedef __u8 uint8_t;
111 typedef __u64 uint64_t;
63 struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block *head; } ;
12 struct plist_head { struct list_head node_list; } ;
77 union __anonunion_ldv_27330_194 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ;
77 union __anonunion_ldv_27334_195 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ;
77 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion_ldv_27330_194 ldv_27330; union __anonunion_ldv_27334_195 ldv_27334; unsigned int flags; } ;
92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ;
278 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; void *bip_buf; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned char bip_owns_buf; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ;
490 struct bio_list { struct bio *head; struct bio *tail; } ;
611 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ;
161 struct in6_addr ;
15 typedef u64 netdev_features_t;
712 struct rtable ;
84 struct pm_qos_request { struct plist_node node; int pm_qos_class; struct delayed_work work; } ;
48 struct pm_qos_flags_request { struct list_head node; s32 flags; } ;
53 enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE = 2, DEV_PM_QOS_FLAGS = 3 } ;
59 union __anonunion_data_206 { struct plist_node pnode; struct pm_qos_flags_request flr; } ;
59 struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union __anonunion_data_206 data; struct device *dev; } ;
68 enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2 } ;
74 struct pm_qos_constraints { struct plist_head list; s32 target_value; s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; } ;
88 struct pm_qos_flags { struct list_head list; s32 effective_flags; } ;
93 struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; } ;
54 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
43 struct __anonstruct_sync_serial_settings_207 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_207 sync_serial_settings;
50 struct __anonstruct_te1_settings_208 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_208 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_209 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_209 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_210 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_210 fr_proto;
69 struct __anonstruct_fr_proto_pvc_211 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_211 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_212 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_212 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_213 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_213 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
177 union __anonunion_ifs_ifsu_214 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
177 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_214 ifs_ifsu; } ;
195 union __anonunion_ifr_ifrn_215 { char ifrn_name[16U]; } ;
195 union __anonunion_ifr_ifru_216 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
195 struct ifreq { union __anonunion_ifr_ifrn_215 ifr_ifrn; union __anonunion_ifr_ifru_216 ifr_ifru; } ;
39 typedef s32 compat_long_t;
44 typedef u32 compat_uptr_t;
276 struct compat_robust_list { compat_uptr_t next; } ;
280 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
703 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
34 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
125 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char reserved1[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
187 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
211 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
233 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
259 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
288 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
305 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
404 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
441 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
469 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
568 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
600 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
642 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
675 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
691 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
711 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
722 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
741 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
767 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
933 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
941 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1017 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
44 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
79 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); } ;
261 union __anonunion_in6_u_234 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ;
261 struct in6_addr { union __anonunion_in6_u_234 in6_u; } ;
40 struct sockaddr_in6 { unsigned short sin6_family; __be16 sin6_port; __be32 sin6_flowinfo; struct in6_addr sin6_addr; __u32 sin6_scope_id; } ;
79 struct icmpv6_mib_device { atomic_long_t mibs[6U]; } ;
89 struct icmpv6msg_mib_device { atomic_long_t mibs[512U]; } ;
110 struct in_addr { __be32 s_addr; } ;
214 struct sockaddr_in { __kernel_sa_family_t sin_family; __be16 sin_port; struct in_addr sin_addr; unsigned char __pad[8U]; } ;
324 struct nlattr ;
400 struct dsa_chip_data { struct device *mii_bus; int sw_addr; char *port_names[12U]; s8 *rtable; } ;
46 struct dsa_platform_data { struct device *netdev; int nr_chips; struct dsa_chip_data *chip; } ;
61 struct dsa_switch ;
61 struct dsa_switch_tree { struct dsa_platform_data *pd; struct net_device *master_netdev; __be16 tag_protocol; s8 cpu_switch; s8 cpu_port; int link_poll_needed; struct work_struct link_poll_work; struct timer_list link_poll_timer; struct dsa_switch *ds[4U]; } ;
94 struct dsa_switch_driver ;
94 struct mii_bus ;
94 struct dsa_switch { struct dsa_switch_tree *dst; int index; struct dsa_chip_data *pd; struct dsa_switch_driver *drv; struct mii_bus *master_mii_bus; u32 dsa_port_mask; u32 phys_port_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[12U]; } ;
146 struct dsa_switch_driver { struct list_head list; __be16 tag_protocol; int priv_size; char * (*probe)(struct mii_bus *, int); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*poll_link)(struct dsa_switch *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); } ;
205 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
80 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
100 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
123 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
138 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
168 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
102 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ;
58 struct percpu_ref ;
54 typedef void percpu_ref_func_t(struct percpu_ref *);
55 struct percpu_ref { atomic_t count; unsigned long pcpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_kill; struct callback_head rcu; } ;
227 struct cgroup_root ;
228 struct cgroup_subsys ;
229 struct cgroup ;
58 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ;
167 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int populated_cnt; struct kernfs_node *kn; struct kernfs_node *populated_kn; unsigned int subtree_control; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[12U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[12U]; struct list_head release_list; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; } ;
260 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
299 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[12U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[12U]; struct callback_head callback_head; } ;
382 struct cftype { char name[64U]; int private; umode_t mode; size_t max_write_len; unsigned int flags; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
600 struct cgroup_taskset ;
608 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*fork)(struct task_struct *); void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int disabled; int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
924 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
103 struct xfrm_policy ;
104 struct xfrm_state ;
124 struct request_sock ;
41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
145 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
104 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
180 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
39 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; } ;
553 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; } ;
28 struct netpoll_info ;
29 struct phy_device ;
30 struct wireless_dev ;
61 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ;
106 typedef enum netdev_tx netdev_tx_t;
125 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
186 struct neigh_parms ;
207 struct netdev_hw_addr_list { struct list_head list; int count; } ;
212 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
241 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*rebuild)(struct sk_buff *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); } ;
292 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ;
336 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
384 typedef enum rx_handler_result rx_handler_result_t;
385 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
522 struct Qdisc ;
522 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long trans_timeout; unsigned long state; struct dql dql; } ;
591 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
603 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
615 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
666 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
689 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
702 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
713 struct netdev_tc_txq { u16 count; u16 offset; } ;
724 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
740 struct netdev_phys_port_id { unsigned char id[32U]; unsigned char id_len; } ;
753 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_setup_tc)(struct net_device *, u8 ); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct neighbour *); void (*ndo_neigh_destroy)(struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 ); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_port_id *); void (*ndo_add_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void (*ndo_del_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); } ;
1189 struct __anonstruct_adj_list_243 { struct list_head upper; struct list_head lower; } ;
1189 struct __anonstruct_all_adj_list_244 { struct list_head upper; struct list_head lower; } ;
1189 struct iw_handler_def ;
1189 struct iw_public_data ;
1189 struct forwarding_accel_ops ;
1189 struct vlan_info ;
1189 struct tipc_bearer ;
1189 struct in_device ;
1189 struct dn_dev ;
1189 struct inet6_dev ;
1189 struct cpu_rmap ;
1189 struct pcpu_lstats ;
1189 struct pcpu_sw_netstats ;
1189 struct pcpu_dstats ;
1189 struct pcpu_vstats ;
1189 union __anonunion_ldv_40273_245 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1189 struct garp_port ;
1189 struct mrp_port ;
1189 struct rtnl_link_ops ;
1189 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct __anonstruct_adj_list_243 adj_list; struct __anonstruct_all_adj_list_244 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; int ifindex; int iflink; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_t carrier_changes; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct forwarding_accel_ops *fwd_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned char name_assign_type; bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; rx_handler_func_t *rx_handler; void *rx_handler_data; struct netdev_queue *ingress_queue; unsigned char broadcast[32U]; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; struct xps_dev_maps *xps_maps; struct cpu_rmap *rx_cpu_rmap; unsigned long trans_start; int watchdog_timeo; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct hlist_node index_hlist; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; struct net *nd_net; union __anonunion_ldv_40273_245 ldv_40273; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; int group; struct pm_qos_request pm_qos_req; } ;
1929 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
548 struct res_counter { unsigned long long usage; unsigned long long max_usage; unsigned long long limit; unsigned long long soft_limit; unsigned long long failcnt; spinlock_t lock; struct res_counter *parent; } ;
222 struct kioctx ;
30 typedef int kiocb_cancel_fn(struct kiocb *);
31 union __anonunion_ki_obj_246 { void *user; struct task_struct *tsk; } ;
31 struct eventfd_ctx ;
31 struct kiocb { struct file *ki_filp; struct kioctx *ki_ctx; kiocb_cancel_fn *ki_cancel; void *private; union __anonunion_ki_obj_246 ki_obj; __u64 ki_user_data; loff_t ki_pos; size_t ki_nbytes; struct list_head ki_list; struct eventfd_ctx *ki_eventfd; } ;
100 struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; } ;
49 struct bpf_insn { __u8 code; unsigned char dst_reg; unsigned char src_reg; __s16 off; __s32 imm; } ;
316 struct sock_fprog_kern { u16 len; struct sock_filter *filter; } ;
322 union __anonunion_ldv_42065_247 { struct sock_filter insns[0U]; struct bpf_insn insnsi[0U]; struct work_struct work; } ;
322 struct bpf_prog { unsigned char jited; unsigned int len; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *, const struct bpf_insn *); union __anonunion_ldv_42065_247 ldv_42065; } ;
339 struct sk_filter { atomic_t refcnt; struct callback_head rcu; struct bpf_prog *prog; } ;
32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ;
114 struct nla_policy { u16 type; u16 len; } ;
25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); } ;
168 struct neigh_table ;
168 struct neigh_parms { struct net *net; struct net_device *dev; struct neigh_parms *next; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; atomic_t refcnt; struct callback_head callback_head; int reachable_time; int data[12U]; unsigned long data_state[1U]; } ;
111 struct neigh_statistics { unsigned long allocs; unsigned long destroys; unsigned long hash_grows; unsigned long res_failed; unsigned long lookups; unsigned long hits; unsigned long rcv_probes_mcast; unsigned long rcv_probes_ucast; unsigned long periodic_gc_runs; unsigned long forced_gc_runs; unsigned long unres_discards; } ;
130 struct neigh_ops ;
130 struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; __u8 flags; __u8 nud_state; __u8 type; __u8 dead; seqlock_t ha_lock; unsigned char ha[32U]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct callback_head rcu; struct net_device *dev; u8 primary_key[0U]; } ;
159 struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); } ;
167 struct pneigh_entry { struct pneigh_entry *next; struct net *net; struct net_device *dev; u8 flags; u8 key[0U]; } ;
177 struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4U]; struct callback_head rcu; } ;
190 struct neigh_table { struct neigh_table *next; int family; int entry_size; int key_len; __u32 (*hash)(const void *, const struct net_device *, __u32 *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); char *id; struct neigh_parms parms; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; rwlock_t lock; unsigned long last_rand; struct neigh_statistics *stats; struct neigh_hash_table *nht; struct pneigh_entry **phash_buckets; } ;
461 struct dn_route ;
461 union __anonunion_ldv_43574_252 { struct dst_entry *next; struct rtable *rt_next; struct rt6_info *rt6_next; struct dn_route *dn_next; } ;
461 struct dst_entry { struct callback_head callback_head; struct dst_entry *child; struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; struct dst_entry *path; struct dst_entry *from; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct sock *, struct sk_buff *); unsigned short flags; unsigned short pending_confirm; short error; short obsolete; unsigned short header_len; unsigned short trailer_len; __u32 tclassid; long __pad_to_align_refcnt[2U]; atomic_t __refcnt; int __use; unsigned long lastuse; union __anonunion_ldv_43574_252 ldv_43574; } ;
124 struct __anonstruct_socket_lock_t_253 { spinlock_t slock; int owned; wait_queue_head_t wq; struct lockdep_map dep_map; } ;
124 typedef struct __anonstruct_socket_lock_t_253 socket_lock_t;
124 struct proto ;
130 typedef __u32 __portpair;
131 typedef __u64 __addrpair;
132 struct __anonstruct_ldv_43846_255 { __be32 skc_daddr; __be32 skc_rcv_saddr; } ;
132 union __anonunion_ldv_43847_254 { __addrpair skc_addrpair; struct __anonstruct_ldv_43846_255 ldv_43846; } ;
132 union __anonunion_ldv_43851_256 { unsigned int skc_hash; __u16 skc_u16hashes[2U]; } ;
132 struct __anonstruct_ldv_43857_258 { __be16 skc_dport; __u16 skc_num; } ;
132 union __anonunion_ldv_43858_257 { __portpair skc_portpair; struct __anonstruct_ldv_43857_258 ldv_43857; } ;
132 union __anonunion_ldv_43868_259 { struct hlist_node skc_bind_node; struct hlist_nulls_node skc_portaddr_node; } ;
132 union __anonunion_ldv_43877_260 { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; } ;
132 struct sock_common { union __anonunion_ldv_43847_254 ldv_43847; union __anonunion_ldv_43851_256 ldv_43851; union __anonunion_ldv_43858_257 ldv_43858; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; unsigned char skc_reuseport; unsigned char skc_ipv6only; int skc_bound_dev_if; union __anonunion_ldv_43868_259 ldv_43868; struct proto *skc_prot; struct net *skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; int skc_dontcopy_begin[0U]; union __anonunion_ldv_43877_260 ldv_43877; int skc_tx_queue_mapping; atomic_t skc_refcnt; int skc_dontcopy_end[0U]; } ;
218 struct cg_proto ;
219 struct __anonstruct_sk_backlog_261 { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } ;
219 struct sock { struct sock_common __sk_common; socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; struct __anonstruct_sk_backlog_261 sk_backlog; int sk_forward_alloc; __u32 sk_rxhash; __u32 sk_txhash; unsigned int sk_napi_id; unsigned int sk_ll_usec; atomic_t sk_drops; int sk_rcvbuf; struct sk_filter *sk_filter; struct socket_wq *sk_wq; struct xfrm_policy *sk_policy[2U]; unsigned long sk_flags; struct dst_entry *sk_rx_dst; struct dst_entry *sk_dst_cache; spinlock_t sk_dst_lock; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_write_queue; unsigned char sk_shutdown; unsigned char sk_no_check_tx; unsigned char sk_no_check_rx; unsigned char sk_userlocks; unsigned char sk_protocol; unsigned short sk_type; int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; u32 sk_max_pacing_rate; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err; int sk_err_soft; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; __u32 sk_cgrp_prioidx; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; void *sk_protinfo; struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; u32 sk_tskey; struct socket *sk_socket; void *sk_user_data; struct page_frag sk_frag; struct sk_buff *sk_send_head; __s32 sk_peek_off; int sk_write_pending; void *sk_security; __u32 sk_mark; u32 sk_classid; struct cg_proto *sk_cgrp; void (*sk_state_change)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); } ;
919 struct request_sock_ops ;
920 struct timewait_sock_ops ;
921 struct inet_hashinfo ;
922 struct raw_hashinfo ;
937 struct udp_table ;
937 union __anonunion_h_262 { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; } ;
937 struct proto { void (*close)(struct sock *, long); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, int, int *); int (*ioctl)(struct sock *, int, unsigned long); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, char *, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct sock *, int, int, char *, int *); int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); int (*sendmsg)(struct kiocb *, struct sock *, struct msghdr *, size_t ); int (*recvmsg)(struct kiocb *, struct sock *, struct msghdr *, size_t , int, int, int *); int (*sendpage)(struct sock *, struct page *, int, size_t , int); int (*bind)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); void (*release_cb)(struct sock *); void (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, unsigned short); void (*clear_sk)(struct sock *, int); unsigned int inuse_idx; bool (*stream_memory_free)(const struct sock *); void (*enter_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; struct percpu_counter *sockets_allocated; int *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; int slab_flags; struct percpu_counter *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union __anonunion_h_262 h; struct module *owner; char name[32U]; struct list_head node; int (*init_cgroup)(struct mem_cgroup *, struct cgroup_subsys *); void (*destroy_cgroup)(struct mem_cgroup *); struct cg_proto * (*proto_cgroup)(struct mem_cgroup *); } ;
1061 struct cg_proto { struct res_counter memory_allocated; struct percpu_counter sockets_allocated; int memory_pressure; long sysctl_mem[3U]; unsigned long flags; struct mem_cgroup *memcg; } ;
169 struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(struct sock *, struct request_sock *); void (*send_ack)(struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(struct sock *, struct sk_buff *); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(struct sock *, struct request_sock *); } ;
47 struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; unsigned char cookie_ts; unsigned char num_timeout; u32 window_clamp; u32 rcv_wnd; u32 ts_recent; unsigned long expires; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 secid; u32 peer_secid; } ;
373 struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *, struct sock *, void *); void (*twsk_destructor)(struct sock *); } ;
167 struct ipv6_devconf { __s32 forwarding; __s32 hop_limit; __s32 mtu6; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __s32 accept_ra_pinfo; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_max_plen; __s32 proxy_ndp; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 optimistic_dad; __s32 mc_forwarding; __s32 disable_ipv6; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; void *sysctl; } ;
101 struct ip6_sf_list { struct ip6_sf_list *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2U]; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; } ;
110 struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 *next; struct ip6_sf_list *mca_sources; struct ip6_sf_list *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2U]; struct timer_list mca_timer; unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; unsigned long mca_cstamp; unsigned long mca_tstamp; } ;
142 struct ifacaddr6 { struct in6_addr aca_addr; struct inet6_dev *aca_idev; struct rt6_info *aca_rt; struct ifacaddr6 *aca_next; int aca_users; atomic_t aca_refcnt; spinlock_t aca_lock; unsigned long aca_cstamp; unsigned long aca_tstamp; } ;
154 struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; struct ipstats_mib *ipv6; struct icmpv6_mib_device *icmpv6dev; struct icmpv6msg_mib_device *icmpv6msgdev; } ;
165 struct inet6_dev { struct net_device *dev; struct list_head addr_list; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; spinlock_t mc_lock; unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; unsigned long mc_v1_seen; unsigned long mc_qi; unsigned long mc_qri; unsigned long mc_maxdelay; struct timer_list mc_gq_timer; struct timer_list mc_ifc_timer; struct timer_list mc_dad_timer; struct ifacaddr6 *ac_list; rwlock_t lock; atomic_t refcnt; __u32 if_flags; int dead; u8 rndid[8U]; struct timer_list regen_timer; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __u8 rs_probes; __u8 addr_gen_mode; unsigned long tstamp; struct callback_head rcu; } ;
917 union __anonunion_ldv_48246_277 { __be32 a4; __be32 a6[4U]; } ;
917 struct inetpeer_addr_base { union __anonunion_ldv_48246_277 ldv_48246; } ;
24 struct inetpeer_addr { struct inetpeer_addr_base addr; __u16 family; } ;
29 union __anonunion_ldv_48261_278 { struct list_head gc_list; struct callback_head gc_rcu; } ;
29 struct __anonstruct_ldv_48265_280 { atomic_t rid; } ;
29 union __anonunion_ldv_48268_279 { struct __anonstruct_ldv_48265_280 ldv_48265; struct callback_head rcu; struct inet_peer *gc_next; } ;
29 struct inet_peer { struct inet_peer *avl_left; struct inet_peer *avl_right; struct inetpeer_addr daddr; __u32 avl_height; u32 metrics[15U]; u32 rate_tokens; unsigned long rate_last; union __anonunion_ldv_48261_278 ldv_48261; union __anonunion_ldv_48268_279 ldv_48268; __u32 dtime; atomic_t refcnt; } ;
60 struct inet_peer_base { struct inet_peer *root; seqlock_t lock; u32 flush_seq; int total; } ;
50 struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; __be32 rt_gateway; u32 rt_pmtu; struct list_head rt_uncached; } ;
315 struct inet_ehash_bucket { struct hlist_nulls_head chain; } ;
95 struct inet_bind_hashbucket { spinlock_t lock; struct hlist_head chain; } ;
103 struct inet_listen_hashbucket { spinlock_t lock; struct hlist_nulls_head head; } ;
115 struct inet_hashinfo { struct inet_ehash_bucket *ehash; spinlock_t *ehash_locks; unsigned int ehash_mask; unsigned int ehash_locks_mask; struct inet_bind_hashbucket *bhash; unsigned int bhash_size; struct kmem_cache *bind_bucket_cachep; struct inet_listen_hashbucket listening_hash[32U]; atomic_t bsockets; } ;
14 struct ceph_pagelist_cursor { struct ceph_pagelist *pl; struct list_head *page_lru; size_t room; } ;
104 struct ceph_mon_request_header { __le64 have_version; __le16 session_mon; __le64 session_mon_tid; } ;
177 struct ceph_mon_statfs { struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; } ;
182 struct ceph_statfs { __le64 kb; __le64 kb_used; __le64 kb_avail; __le64 num_objects; } ;
187 struct ceph_mon_statfs_reply { struct ceph_fsid fsid; __le64 version; struct ceph_statfs st; } ;
195 struct ceph_mon_poolop { struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; __le32 pool; __le32 op; __le64 auid; __le64 snapid; __le32 name_len; } ;
205 struct ceph_mon_poolop_reply { struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; __le32 reply_code; __le32 epoch; char has_data; char data[0U]; } ;
233 struct ceph_mon_subscribe_item { __le64 have_version; __le64 have; __u8 onetime; } ;
240 struct ceph_mon_subscribe_ack { __le32 duration; struct ceph_fsid fsid; } ;
24 struct ceph_mon_generic_request ;
41 struct ceph_mon_generic_request { struct kref kref; u64 tid; struct rb_node node; int result; void *buf; int buf_len; struct completion completion; struct ceph_msg *request; struct ceph_msg *reply; } ;
112 struct ceph_eversion { __le32 epoch; __le64 version; } ;
375 struct __anonstruct_extent_205 { __le64 offset; __le64 length; __le64 truncate_size; __le32 truncate_seq; } ;
375 struct __anonstruct_xattr_206 { __le32 name_len; __le32 value_len; __u8 cmp_op; __u8 cmp_mode; } ;
375 struct __anonstruct_cls_207 { __u8 class_len; __u8 method_len; __u8 argc; __le32 indata_len; } ;
375 struct __anonstruct_pgls_208 { __le64 cookie; __le64 count; } ;
375 struct __anonstruct_snap_209 { __le64 snapid; } ;
375 struct __anonstruct_watch_210 { __le64 cookie; __le64 ver; __u8 flag; } ;
375 struct __anonstruct_clonerange_211 { __le64 offset; __le64 length; __le64 src_offset; } ;
375 struct __anonstruct_alloc_hint_212 { __le64 expected_object_size; __le64 expected_write_size; } ;
375 union __anonunion_ldv_29384_204 { struct __anonstruct_extent_205 extent; struct __anonstruct_xattr_206 xattr; struct __anonstruct_cls_207 cls; struct __anonstruct_pgls_208 pgls; struct __anonstruct_snap_209 snap; struct __anonstruct_watch_210 watch; struct __anonstruct_clonerange_211 clonerange; struct __anonstruct_alloc_hint_212 alloc_hint; } ;
375 struct ceph_osd_op { __le16 op; __le32 flags; union __anonunion_ldv_29384_204 ldv_29384; __le32 payload_len; } ;
434 struct ceph_file_layout { __le32 fl_stripe_unit; __le32 fl_stripe_count; __le32 fl_object_size; __le32 fl_cas_hash; __le32 fl_object_stripe_unit; __le32 fl_unused; __le32 fl_pg_pool; } ;
12 struct ceph_vino { u64 ino; u64 snap; } ;
200 struct ceph_pg { uint64_t pool; uint32_t seed; } ;
26 struct ceph_pg_pool_info { struct rb_node node; s64 id; u8 type; u8 size; u8 crush_ruleset; u8 object_hash; u32 pg_num; u32 pgp_num; int pg_num_mask; int pgp_num_mask; s64 read_tier; s64 write_tier; u64 flags; char *name; } ;
56 struct ceph_object_locator { s64 pool; } ;
59 struct ceph_object_id { char name[100U]; int name_len; } ;
74 struct ceph_snap_context ;
75 struct ceph_osd_request ;
27 struct ceph_osd { atomic_t o_ref; struct ceph_osd_client *o_osdc; int o_osd; int o_incarnation; struct rb_node o_node; struct ceph_connection o_con; struct list_head o_requests; struct list_head o_linger_requests; struct list_head o_osd_lru; struct ceph_auth_handshake o_auth; unsigned long lru_ttl; int o_marked_for_keepalive; struct list_head o_keepalive_item; } ;
44 enum ceph_osd_data_type { CEPH_OSD_DATA_TYPE_NONE = 0, CEPH_OSD_DATA_TYPE_PAGES = 1, CEPH_OSD_DATA_TYPE_PAGELIST = 2, CEPH_OSD_DATA_TYPE_BIO = 3 } ;
51 struct __anonstruct_ldv_31142_233 { struct page **pages; u64 length; u32 alignment; bool pages_from_pool; bool own_pages; } ;
51 struct __anonstruct_ldv_31147_234 { struct bio *bio; size_t bio_length; } ;
51 union __anonunion_ldv_31148_232 { struct __anonstruct_ldv_31142_233 ldv_31142; struct ceph_pagelist *pagelist; struct __anonstruct_ldv_31147_234 ldv_31147; } ;
51 struct ceph_osd_data { enum ceph_osd_data_type type; union __anonunion_ldv_31148_232 ldv_31148; } ;
75 struct __anonstruct_extent_236 { u64 offset; u64 length; u64 truncate_size; u32 truncate_seq; struct ceph_osd_data osd_data; } ;
75 struct __anonstruct_cls_237 { const char *class_name; const char *method_name; struct ceph_osd_data request_info; struct ceph_osd_data request_data; struct ceph_osd_data response_data; __u8 class_len; __u8 method_len; __u8 argc; } ;
75 struct __anonstruct_watch_238 { u64 cookie; u64 ver; u32 prot_ver; u32 timeout; __u8 flag; } ;
75 struct __anonstruct_alloc_hint_239 { u64 expected_object_size; u64 expected_write_size; } ;
75 union __anonunion_ldv_31183_235 { struct ceph_osd_data raw_data_in; struct __anonstruct_extent_236 extent; struct __anonstruct_cls_237 cls; struct __anonstruct_watch_238 watch; struct __anonstruct_alloc_hint_239 alloc_hint; } ;
75 struct ceph_osd_req_op { u16 op; u32 flags; u32 payload_len; union __anonunion_ldv_31183_235 ldv_31183; } ;
112 struct ceph_osd_request { u64 r_tid; struct rb_node r_node; struct list_head r_req_lru_item; struct list_head r_osd_item; struct list_head r_linger_item; struct list_head r_linger_osd_item; struct ceph_osd *r_osd; struct ceph_pg r_pgid; int r_pg_osds[16U]; int r_num_pg_osds; struct ceph_msg *r_request; struct ceph_msg *r_reply; int r_flags; u32 r_sent; unsigned int r_num_ops; struct ceph_osd_req_op r_ops[3U]; __le32 *r_request_osdmap_epoch; __le32 *r_request_flags; __le64 *r_request_pool; void *r_request_pgid; __le32 *r_request_attempts; bool r_paused; struct ceph_eversion *r_request_reassert_version; int r_result; int r_reply_op_len[3U]; s32 r_reply_op_result[3U]; int r_got_reply; int r_linger; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; struct completion r_completion; struct completion r_safe_completion; void (*r_callback)(struct ceph_osd_request *, struct ceph_msg *); void (*r_unsafe_callback)(struct ceph_osd_request *, bool ); struct ceph_eversion r_reassert_version; struct list_head r_unsafe_item; struct inode *r_inode; void *r_priv; struct ceph_object_locator r_base_oloc; struct ceph_object_id r_base_oid; struct ceph_object_locator r_target_oloc; struct ceph_object_id r_target_oid; u64 r_snapid; unsigned long r_stamp; struct ceph_snap_context *r_snapc; } ;
171 struct ceph_request_redirect { struct ceph_object_locator oloc; } ;
175 struct ceph_osd_event { u64 cookie; int one_shot; struct ceph_osd_client *osdc; void (*cb)(u64 , u64 , u8 , void *); void *data; struct rb_node node; struct list_head osd_node; struct kref kref; } ;
186 struct ceph_osd_event_work { struct work_struct work; struct ceph_osd_event *event; u64 ver; u64 notify_id; u8 opcode; } ;
137 struct ceph_snap_context { atomic_t nref; u64 seq; u32 num_snaps; u64 snaps[]; } ;
29 typedef long long __s64;
126 struct crush_bucket_uniform { struct crush_bucket h; __u32 item_weight; } ;
131 struct crush_bucket_list { struct crush_bucket h; __u32 *item_weights; __u32 *sum_weights; } ;
137 struct crush_bucket_tree { struct crush_bucket h; __u8 num_nodes; __u32 *node_weights; } ;
145 struct crush_bucket_straw { struct crush_bucket h; __u32 *item_weights; __u32 *straws; } ;
71 struct __anonstruct_pg_temp_228 { int len; int osds[]; } ;
71 struct __anonstruct_primary_temp_229 { int osd; } ;
71 union __anonunion_ldv_30308_227 { struct __anonstruct_pg_temp_228 pg_temp; struct __anonstruct_primary_temp_229 primary_temp; } ;
71 struct ceph_pg_mapping { struct rb_node node; struct ceph_pg pgid; union __anonunion_ldv_30308_227 ldv_30308; } ;
114 struct ceph_none_authorizer { char buf[128U]; int buf_len; char reply_buf[0U]; } ;
19 struct ceph_auth_none_info { bool starting; bool built_authorizer; struct ceph_none_authorizer au; } ;
109 struct crypto_ablkcipher ;
110 struct crypto_async_request ;
111 struct crypto_aead ;
112 struct crypto_blkcipher ;
113 struct crypto_hash ;
114 struct crypto_rng ;
115 struct crypto_tfm ;
116 struct crypto_type ;
117 struct aead_givcrypt_request ;
118 struct skcipher_givcrypt_request ;
129 struct crypto_async_request { struct list_head list; void (*complete)(struct crypto_async_request *, int); void *data; struct crypto_tfm *tfm; u32 flags; } ;
138 struct ablkcipher_request { struct crypto_async_request base; unsigned int nbytes; void *info; struct scatterlist *src; struct scatterlist *dst; void *__ctx[]; } ;
151 struct aead_request { struct crypto_async_request base; unsigned int assoclen; unsigned int cryptlen; u8 *iv; struct scatterlist *assoc; struct scatterlist *src; struct scatterlist *dst; void *__ctx[]; } ;
177 struct blkcipher_desc { struct crypto_blkcipher *tfm; void *info; u32 flags; } ;
191 struct hash_desc { struct crypto_hash *tfm; u32 flags; } ;
196 struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher *, const u8 *, unsigned int); int (*encrypt)(struct ablkcipher_request *); int (*decrypt)(struct ablkcipher_request *); int (*givencrypt)(struct skcipher_givcrypt_request *); int (*givdecrypt)(struct skcipher_givcrypt_request *); const char *geniv; unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; } ;
215 struct aead_alg { int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); int (*setauthsize)(struct crypto_aead *, unsigned int); int (*encrypt)(struct aead_request *); int (*decrypt)(struct aead_request *); int (*givencrypt)(struct aead_givcrypt_request *); int (*givdecrypt)(struct aead_givcrypt_request *); const char *geniv; unsigned int ivsize; unsigned int maxauthsize; } ;
230 struct blkcipher_alg { int (*setkey)(struct crypto_tfm *, const u8 *, unsigned int); int (*encrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); int (*decrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); const char *geniv; unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; } ;
247 struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); } ;
256 struct compress_alg { int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); } ;
262 struct rng_alg { int (*rng_make_random)(struct crypto_rng *, u8 *, unsigned int); int (*rng_reset)(struct crypto_rng *, u8 *, unsigned int); unsigned int seedsize; } ;
271 union __anonunion_cra_u_152 { struct ablkcipher_alg ablkcipher; struct aead_alg aead; struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct compress_alg compress; struct rng_alg rng; } ;
271 struct crypto_alg { struct list_head cra_list; struct list_head cra_users; u32 cra_flags; unsigned int cra_blocksize; unsigned int cra_ctxsize; unsigned int cra_alignmask; int cra_priority; atomic_t cra_refcnt; char cra_name[64U]; char cra_driver_name[64U]; const struct crypto_type *cra_type; union __anonunion_cra_u_152 cra_u; int (*cra_init)(struct crypto_tfm *); void (*cra_exit)(struct crypto_tfm *); void (*cra_destroy)(struct crypto_alg *); struct module *cra_module; } ;
325 struct ablkcipher_tfm { int (*setkey)(struct crypto_ablkcipher *, const u8 *, unsigned int); int (*encrypt)(struct ablkcipher_request *); int (*decrypt)(struct ablkcipher_request *); int (*givencrypt)(struct skcipher_givcrypt_request *); int (*givdecrypt)(struct skcipher_givcrypt_request *); struct crypto_ablkcipher *base; unsigned int ivsize; unsigned int reqsize; } ;
345 struct aead_tfm { int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); int (*encrypt)(struct aead_request *); int (*decrypt)(struct aead_request *); int (*givencrypt)(struct aead_givcrypt_request *); int (*givdecrypt)(struct aead_givcrypt_request *); struct crypto_aead *base; unsigned int ivsize; unsigned int authsize; unsigned int reqsize; } ;
360 struct blkcipher_tfm { void *iv; int (*setkey)(struct crypto_tfm *, const u8 *, unsigned int); int (*encrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); int (*decrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); } ;
369 struct cipher_tfm { int (*cit_setkey)(struct crypto_tfm *, const u8 *, unsigned int); void (*cit_encrypt_one)(struct crypto_tfm *, u8 *, const u8 *); void (*cit_decrypt_one)(struct crypto_tfm *, u8 *, const u8 *); } ;
377 struct hash_tfm { int (*init)(struct hash_desc *); int (*update)(struct hash_desc *, struct scatterlist *, unsigned int); int (*final)(struct hash_desc *, u8 *); int (*digest)(struct hash_desc *, struct scatterlist *, unsigned int, u8 *); int (*setkey)(struct crypto_hash *, const u8 *, unsigned int); unsigned int digestsize; } ;
389 struct compress_tfm { int (*cot_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); int (*cot_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); } ;
396 struct rng_tfm { int (*rng_gen_random)(struct crypto_rng *, u8 *, unsigned int); int (*rng_reset)(struct crypto_rng *, u8 *, unsigned int); } ;
404 union __anonunion_crt_u_153 { struct ablkcipher_tfm ablkcipher; struct aead_tfm aead; struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct hash_tfm hash; struct compress_tfm compress; struct rng_tfm rng; } ;
404 struct crypto_tfm { u32 crt_flags; union __anonunion_crt_u_153 crt_u; void (*exit)(struct crypto_tfm *); struct crypto_alg *__crt_alg; void *__crt_ctx[]; } ;
433 struct crypto_ablkcipher { struct crypto_tfm base; } ;
437 struct crypto_aead { struct crypto_tfm base; } ;
441 struct crypto_blkcipher { struct crypto_tfm base; } ;
453 struct crypto_hash { struct crypto_tfm base; } ;
457 struct crypto_rng { struct crypto_tfm base; } ;
357 struct key_construction { struct key *key; struct key *authkey; } ;
28 struct key_preparsed_payload { char *description; void *type_data[2U]; void *payload[2U]; const void *data; size_t datalen; size_t quotalen; time_t expiry; bool trusted; } ;
53 struct key_type { const char *name; size_t def_datalen; unsigned int def_lookup_type; int (*vet_description)(const char *); int (*preparse)(struct key_preparsed_payload *); void (*free_preparse)(struct key_preparsed_payload *); int (*instantiate)(struct key *, struct key_preparsed_payload *); int (*update)(struct key *, struct key_preparsed_payload *); int (*match)(const struct key *, const void *); void (*revoke)(struct key *); void (*destroy)(struct key *); void (*describe)(const struct key *, struct seq_file *); long int (*read)(const struct key *, char *, size_t ); int (*request_key)(struct key_construction *, const char *, void *); struct list_head link; struct lock_class_key lock_class; } ;
50 struct ceph_x_ticket_blob { __u8 struct_v; __le64 secret_id; __le32 blob_len; char blob[]; } ;
15 struct ceph_x_request_header { __le16 op; } ;
21 struct ceph_x_reply_header { __le16 op; __le32 result; } ;
26 struct ceph_x_server_challenge { __u8 struct_v; __le64 server_challenge; } ;
35 struct ceph_x_authenticate { __u8 struct_v; __le64 client_challenge; __le64 key; } ;
41 struct ceph_x_service_ticket_request { __u8 struct_v; __le32 keys; } ;
47 struct ceph_x_challenge_blob { __le64 server_challenge; __le64 client_challenge; } ;
52 struct ceph_x_authorize_a { __u8 struct_v; __le64 global_id; __le32 service_id; struct ceph_x_ticket_blob ticket_blob; } ;
68 struct ceph_x_authorize_b { __u8 struct_v; __le64 nonce; } ;
73 struct ceph_x_authorize_reply { __u8 struct_v; __le64 nonce_plus_one; } ;
78 struct ceph_x_encrypt_header { __u8 struct_v; __le64 magic; } ;
89 struct ceph_x_ticket_handler { struct rb_node node; unsigned int service; struct ceph_crypto_key session_key; struct ceph_timespec validity; u64 secret_id; struct ceph_buffer *ticket_blob; unsigned long renew_after; unsigned long expires; } ;
26 struct ceph_x_authorizer { struct ceph_buffer *buf; unsigned int service; u64 nonce; u64 secret_id; char reply_buf[128U]; } ;
35 struct ceph_x_info { struct ceph_crypto_key secret; bool starting; u64 server_challenge; unsigned int have_keys; struct rb_root ticket_handlers; struct ceph_x_authorizer auth_authorizer; } ;
75 typedef int pao_T__;
80 typedef int pao_T_____0;
23 struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; unsigned int flags; unsigned int seq; unsigned int m_seq; int last_type; unsigned int depth; char *saved_names[9U]; } ;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long exp, long c);
142 int printk(const char *, ...);
45 int __dynamic_pr_debug(struct _ddebug *, const char *, ...);
403 int sscanf(const char *, const char *, ...);
87 void __bad_percpu_size();
10 extern struct task_struct *current_task;
12 struct task_struct * get_current();
24 void INIT_LIST_HEAD(struct list_head *list);
34 void * __memcpy(void *, const void *, size_t );
60 int memcmp(const void *, const void *, size_t );
64 int strcmp(const char *, const char *);
89 char * strsep(char **, const char *);
119 char * kstrndup(const char *, size_t , gfp_t );
23 void * ERR_PTR(long error);
28 long int PTR_ERR(const void *ptr);
33 bool IS_ERR(const void *ptr);
37 void atomic_set(atomic_t *v, int i);
120 void __mutex_init(struct mutex *, const char *, struct lock_class_key *);
174 int mutex_trylock(struct mutex *);
177 int ldv_mutex_trylock_6(struct mutex *ldv_func_arg1);
179 void mutex_unlock(struct mutex *);
182 void ldv_mutex_unlock_1(struct mutex *ldv_func_arg1);
186 void ldv_mutex_unlock_4(struct mutex *ldv_func_arg1);
190 void ldv_mutex_unlock_7(struct mutex *ldv_func_arg1);
194 void ldv_mutex_unlock_9(struct mutex *ldv_func_arg1);
7 void mutex_lock(struct mutex *);
10 void ldv_mutex_lock_2(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_3(struct mutex *ldv_func_arg1);
18 void ldv_mutex_lock_5(struct mutex *ldv_func_arg1);
22 void ldv_mutex_lock_8(struct mutex *ldv_func_arg1);
35 void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock);
39 void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock);
43 void ldv_mutex_lock_lock(struct mutex *lock);
47 void ldv_mutex_unlock_lock(struct mutex *lock);
51 void ldv_mutex_lock_mount_mutex_of_ceph_client(struct mutex *lock);
55 void ldv_mutex_unlock_mount_mutex_of_ceph_client(struct mutex *lock);
83 void ldv_mutex_lock_mutex_of_device(struct mutex *lock);
84 int ldv_mutex_trylock_mutex_of_device(struct mutex *lock);
87 void ldv_mutex_unlock_mutex_of_device(struct mutex *lock);
15 const char * ceph_file_part(const char *s, int len);
77 extern volatile unsigned long jiffies;
69 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);
825 long int prepare_to_wait_event(wait_queue_head_t *, wait_queue_t *, int);
826 void finish_wait(wait_queue_head_t *, wait_queue_t *);
75 void * __vmalloc(unsigned long, gfp_t , pgprot_t );
79 void vfree(const void *);
229 void key_put(struct key *);
247 struct key * request_key(struct key_type *, const char *, const char *);
377 long int schedule_timeout(long);
18 extern const unsigned char _ctype[];
6 struct key_type key_type_ceph;
28 int match_token(char *, const struct match_token *, substring_t *);
29 int match_int(substring_t *, int *);
144 void kfree(const void *);
315 void * __kmalloc(size_t , gfp_t );
445 void * kmalloc(size_t size, gfp_t flags);
559 void * kmalloc_array(size_t n, size_t size, gfp_t flags);
572 void * kcalloc(size_t n, size_t size, gfp_t flags);
637 void * kzalloc(size_t size, gfp_t flags);
360 int is_vmalloc_addr(const void *x);
7 extern struct net init_net;
18 int ceph_fsid_compare(const struct ceph_fsid *a, const struct ceph_fsid *b);
247 int ceph_parse_ips(const char *c, const char *end, struct ceph_entity_addr *addr, int max_count, int *count);
252 int ceph_msgr_init();
253 void ceph_msgr_exit();
256 void ceph_messenger_init(struct ceph_messenger *msgr, struct ceph_entity_addr *myaddr, u64 supported_features, u64 required_features, bool nocrc);
91 int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr);
94 int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
95 void ceph_monc_stop(struct ceph_mon_client *monc);
116 int ceph_monc_open_session(struct ceph_mon_client *monc);
232 int ceph_osdc_setup();
233 void ceph_osdc_cleanup();
235 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client);
237 void ceph_osdc_stop(struct ceph_osd_client *osdc);
182 bool libceph_compatible(void *data);
184 const char * ceph_msg_type_name(int type);
185 int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
186 void * ceph_kvmalloc(size_t size, gfp_t flags);
187 void ceph_kvfree(const void *ptr);
189 struct ceph_options * ceph_parse_options(char *options, const char *dev_name___0, const char *dev_name_end, int (*parse_extra_token)(char *, void *), void *private);
193 void ceph_destroy_options(struct ceph_options *opt);
194 int ceph_compare_options(struct ceph_options *new_opt, struct ceph_client *client);
196 struct ceph_client * ceph_create_client(struct ceph_options *opt, void *private, u64 supported_features, u64 required_features);
200 u64 ceph_client_id(struct ceph_client *client);
201 void ceph_destroy_client(struct ceph_client *client);
202 int __ceph_open_session(struct ceph_client *client, unsigned long started);
204 int ceph_open_session(struct ceph_client *client);
27 int ceph_debugfs_init();
28 void ceph_debugfs_cleanup();
30 void ceph_debugfs_client_cleanup(struct ceph_client *client);
17 void ceph_crypto_key_destroy(struct ceph_crypto_key *key);
23 int ceph_crypto_key_clone(struct ceph_crypto_key *dst, const struct ceph_crypto_key *src);
27 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey);
44 int ceph_crypto_init();
45 void ceph_crypto_shutdown();
127 const char __kstrtab_libceph_compatible[19U] = { 'l', 'i', 'b', 'c', 'e', 'p', 'h', '_', 'c', 'o', 'm', 'p', 'a', 't', 'i', 'b', 'l', 'e', '\x0' };
127 const struct kernel_symbol __ksymtab_libceph_compatible;
127 const struct kernel_symbol __ksymtab_libceph_compatible = { (unsigned long)(&libceph_compatible), (const char *)(&__kstrtab_libceph_compatible) };
140 const char __kstrtab_ceph_file_part[15U] = { 'c', 'e', 'p', 'h', '_', 'f', 'i', 'l', 'e', '_', 'p', 'a', 'r', 't', '\x0' };
140 const struct kernel_symbol __ksymtab_ceph_file_part;
140 const struct kernel_symbol __ksymtab_ceph_file_part = { (unsigned long)(&ceph_file_part), (const char *)(&__kstrtab_ceph_file_part) };
174 const char __kstrtab_ceph_msg_type_name[19U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 't', 'y', 'p', 'e', '_', 'n', 'a', 'm', 'e', '\x0' };
174 const struct kernel_symbol __ksymtab_ceph_msg_type_name;
174 const struct kernel_symbol __ksymtab_ceph_msg_type_name = { (unsigned long)(&ceph_msg_type_name), (const char *)(&__kstrtab_ceph_msg_type_name) };
192 const char __kstrtab_ceph_check_fsid[16U] = { 'c', 'e', 'p', 'h', '_', 'c', 'h', 'e', 'c', 'k', '_', 'f', 's', 'i', 'd', '\x0' };
192 const struct kernel_symbol __ksymtab_ceph_check_fsid;
192 const struct kernel_symbol __ksymtab_ceph_check_fsid = { (unsigned long)(&ceph_check_fsid), (const char *)(&__kstrtab_ceph_check_fsid) };
194 int strcmp_null(const char *s1, const char *s2);
254 const char __kstrtab_ceph_compare_options[21U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'm', 'p', 'a', 'r', 'e', '_', 'o', 'p', 't', 'i', 'o', 'n', 's', '\x0' };
254 const struct kernel_symbol __ksymtab_ceph_compare_options;
254 const struct kernel_symbol __ksymtab_ceph_compare_options = { (unsigned long)(&ceph_compare_options), (const char *)(&__kstrtab_ceph_compare_options) };
276 int parse_fsid(const char *str, struct ceph_fsid *fsid);
330 struct match_token opt_tokens[14U] = { { 0, "osdtimeout=%d" }, { 1, "osdkeepalive=%d" }, { 2, "mount_timeout=%d" }, { 3, "osd_idle_ttl=%d" }, { 5, "fsid=%s" }, { 6, "name=%s" }, { 7, "secret=%s" }, { 8, "key=%s" }, { 9, "ip=%s" }, { 11, "share" }, { 12, "noshare" }, { 13, "crc" }, { 14, "nocrc" }, { -1, (const char *)0 } };
360 const char __kstrtab_ceph_destroy_options[21U] = { 'c', 'e', 'p', 'h', '_', 'd', 'e', 's', 't', 'r', 'o', 'y', '_', 'o', 'p', 't', 'i', 'o', 'n', 's', '\x0' };
360 const struct kernel_symbol __ksymtab_ceph_destroy_options;
360 const struct kernel_symbol __ksymtab_ceph_destroy_options = { (unsigned long)(&ceph_destroy_options), (const char *)(&__kstrtab_ceph_destroy_options) };
363 int get_secret(struct ceph_crypto_key *dst, const char *name);
554 const char __kstrtab_ceph_parse_options[19U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'r', 's', 'e', '_', 'o', 'p', 't', 'i', 'o', 'n', 's', '\x0' };
554 const struct kernel_symbol __ksymtab_ceph_parse_options;
554 const struct kernel_symbol __ksymtab_ceph_parse_options = { (unsigned long)(&ceph_parse_options), (const char *)(&__kstrtab_ceph_parse_options) };
560 const char __kstrtab_ceph_client_id[15U] = { 'c', 'e', 'p', 'h', '_', 'c', 'l', 'i', 'e', 'n', 't', '_', 'i', 'd', '\x0' };
560 const struct kernel_symbol __ksymtab_ceph_client_id;
560 const struct kernel_symbol __ksymtab_ceph_client_id = { (unsigned long)(&ceph_client_id), (const char *)(&__kstrtab_ceph_client_id) };
614 const char __kstrtab_ceph_create_client[19U] = { 'c', 'e', 'p', 'h', '_', 'c', 'r', 'e', 'a', 't', 'e', '_', 'c', 'l', 'i', 'e', 'n', 't', '\x0' };
614 const struct kernel_symbol __ksymtab_ceph_create_client;
614 const struct kernel_symbol __ksymtab_ceph_create_client = { (unsigned long)(&ceph_create_client), (const char *)(&__kstrtab_ceph_create_client) };
634 const char __kstrtab_ceph_destroy_client[20U] = { 'c', 'e', 'p', 'h', '_', 'd', 'e', 's', 't', 'r', 'o', 'y', '_', 'c', 'l', 'i', 'e', 'n', 't', '\x0' };
634 const struct kernel_symbol __ksymtab_ceph_destroy_client;
634 const struct kernel_symbol __ksymtab_ceph_destroy_client = { (unsigned long)(&ceph_destroy_client), (const char *)(&__kstrtab_ceph_destroy_client) };
639 int have_mon_and_osd_map(struct ceph_client *client);
676 const char __kstrtab___ceph_open_session[20U] = { '_', '_', 'c', 'e', 'p', 'h', '_', 'o', 'p', 'e', 'n', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\x0' };
676 const struct kernel_symbol __ksymtab___ceph_open_session;
676 const struct kernel_symbol __ksymtab___ceph_open_session = { (unsigned long)(&__ceph_open_session), (const char *)(&__kstrtab___ceph_open_session) };
692 const char __kstrtab_ceph_open_session[18U] = { 'c', 'e', 'p', 'h', '_', 'o', 'p', 'e', 'n', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\x0' };
692 const struct kernel_symbol __ksymtab_ceph_open_session;
692 const struct kernel_symbol __ksymtab_ceph_open_session = { (unsigned long)(&ceph_open_session), (const char *)(&__kstrtab_ceph_open_session) };
695 int init_ceph_lib();
730 void exit_ceph_lib();
764 void ldv_check_final_state();
773 void ldv_initialize();
776 void ldv_handler_precall();
779 int nondet_int();
782 int LDV_IN_INTERRUPT = 0;
785 void ldv_main0_sequence_infinite_withcheck_stateful();
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
204 int test_and_set_bit(long nr, volatile unsigned long *addr);
250 int test_and_clear_bit(long nr, volatile unsigned long *addr);
308 int constant_test_bit(long nr, const volatile unsigned long *addr);
314 int variable_test_bit(long nr, const volatile unsigned long *addr);
46 __u16 __fswab16(__u16 val);
414 void print_hex_dump(const char *, const char *, int, int, int, const void *, size_t , bool );
165 void __might_sleep(const char *, int, int);
391 int snprintf(char *, size_t , const char *, ...);
47 void __list_add(struct list_head *, struct list_head *, struct list_head *);
74 void list_add_tail(struct list_head *new, struct list_head *head);
111 void __list_del_entry(struct list_head *);
142 void list_del_init(struct list_head *entry);
164 void list_move_tail(struct list_head *list, struct list_head *head);
176 int list_is_last(const struct list_head *list, const struct list_head *head);
186 int list_empty(const struct list_head *head);
273 void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next);
318 void list_splice_init(struct list_head *list, struct list_head *head);
23 unsigned long int __phys_addr(unsigned long);
55 void * memset(void *, int, size_t );
61 size_t strlen(const char *);
114 void * memchr(const void *, int, __kernel_size_t );
66 void warn_slowpath_fmt(const char *, const int, const char *, ...);
71 void warn_slowpath_null(const char *, const int);
13 void __xchg_wrong_size();
17 void __xadd_wrong_size();
25 int atomic_read(const atomic_t *v);
79 int atomic_sub_and_test(int i, atomic_t *v);
90 void atomic_inc(atomic_t *v);
155 int atomic_add_return(int i, atomic_t *v);
180 int atomic_xchg(atomic_t *v, int new);
279 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int);
177 int ldv_mutex_trylock_24(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_19(struct mutex *ldv_func_arg1);
186 void ldv_mutex_unlock_22(struct mutex *ldv_func_arg1);
190 void ldv_mutex_unlock_25(struct mutex *ldv_func_arg1);
194 void ldv_mutex_unlock_27(struct mutex *ldv_func_arg1);
198 void ldv_mutex_unlock_29(struct mutex *ldv_func_arg1);
202 void ldv_mutex_unlock_30(struct mutex *ldv_func_arg1);
206 void ldv_mutex_unlock_32(struct mutex *ldv_func_arg1);
210 void ldv_mutex_unlock_34(struct mutex *ldv_func_arg1);
214 void ldv_mutex_unlock_37(struct mutex *ldv_func_arg1);
218 void ldv_mutex_unlock_39(struct mutex *ldv_func_arg1);
222 void ldv_mutex_unlock_40(struct mutex *ldv_func_arg1);
226 void ldv_mutex_unlock_42(struct mutex *ldv_func_arg1);
230 void ldv_mutex_unlock_44(struct mutex *ldv_func_arg1);
234 void ldv_mutex_unlock_46(struct mutex *ldv_func_arg1);
238 void ldv_mutex_unlock_47(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_20(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_21(struct mutex *ldv_func_arg1);
18 void ldv_mutex_lock_23(struct mutex *ldv_func_arg1);
22 void ldv_mutex_lock_26(struct mutex *ldv_func_arg1);
26 void ldv_mutex_lock_28(struct mutex *ldv_func_arg1);
30 void ldv_mutex_lock_31(struct mutex *ldv_func_arg1);
34 void ldv_mutex_lock_33(struct mutex *ldv_func_arg1);
38 void ldv_mutex_lock_35(struct mutex *ldv_func_arg1);
42 void ldv_mutex_lock_36(struct mutex *ldv_func_arg1);
46 void ldv_mutex_lock_38(struct mutex *ldv_func_arg1);
50 void ldv_mutex_lock_41(struct mutex *ldv_func_arg1);
54 void ldv_mutex_lock_43(struct mutex *ldv_func_arg1);
58 void ldv_mutex_lock_45(struct mutex *ldv_func_arg1);
62 void ldv_mutex_lock_48(struct mutex *ldv_func_arg1);
107 void ldv_mutex_lock_mutex_of_ceph_connection(struct mutex *lock);
111 void ldv_mutex_unlock_mutex_of_ceph_connection(struct mutex *lock);
6 u32 crc32c(u32 , const void *, unsigned int);
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
22 void _raw_spin_lock(raw_spinlock_t *);
39 void _raw_spin_unlock(raw_spinlock_t *);
290 raw_spinlock_t * spinlock_check(spinlock_t *lock);
301 void spin_lock(spinlock_t *lock);
341 void spin_unlock(spinlock_t *lock);
8 void dump_page(struct page *, const char *);
94 void init_timer_key(struct timer_list *, unsigned int, const char *, struct lock_class_key *);
260 unsigned long int round_jiffies_relative(unsigned long);
20 void delayed_work_timer_fn(unsigned long);
180 void __init_work(struct work_struct *, int);
364 struct workqueue_struct * __alloc_workqueue_key(const char *, unsigned int, int, struct lock_class_key *, const char *, ...);
424 void destroy_workqueue(struct workqueue_struct *);
433 bool queue_delayed_work_on(int, struct workqueue_struct *, struct delayed_work *, unsigned long);
438 void flush_workqueue(struct workqueue_struct *);
450 bool cancel_delayed_work(struct delayed_work *);
485 bool queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay);
26 extern unsigned long empty_zero_page[512U];
356 int PageTail(const struct page *page);
416 struct page * compound_head_by_tail(struct page *tail);
431 struct page * compound_head(struct page *page);
453 int page_count(struct page *page);
498 bool __get_page_tail(struct page *);
500 void get_page(struct page *page);
556 void put_page(struct page *);
912 void * lowmem_page_address(const struct page *page);
56 void * kmap(struct page *page);
62 void kunmap(struct page *page);
55 int in4_pton(const char *, int, u8 *, int, const char **);
56 int in6_pton(const char *, int, u8 *, int, const char **);
16 void get_random_bytes(void *, int);
213 int sock_create_kern(int, int, int, struct socket **);
215 void sock_release(struct socket *);
265 int kernel_sendmsg(struct socket *, struct msghdr *, struct kvec *, size_t , size_t );
267 int kernel_recvmsg(struct socket *, struct msghdr *, struct kvec *, size_t , size_t , int);
283 int kernel_sendpage(struct socket *, struct page *, int, size_t , int);
115 struct kmem_cache * kmem_cache_create(const char *, size_t , size_t , unsigned long, void (*)(void *));
123 void kmem_cache_destroy(struct kmem_cache *);
125 void kmem_cache_free(struct kmem_cache *, void *);
316 void * kmem_cache_alloc(struct kmem_cache *, gfp_t );
32 void kref_init(struct kref *kref);
41 void kref_get(struct kref *kref);
68 int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *));
97 int kref_put(struct kref *kref, void (*release)(struct kref *));
627 void * kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags);
207 void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, unsigned int bytes);
234 void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned int bytes);
29 int dns_query(const char *, const char *, size_t , const char *, char **, time_t *);
780 int sk_stream_min_wspace(const struct sock *sk);
785 int sk_stream_wspace(const struct sock *sk);
1141 bool sk_stream_memory_free(const struct sock *sk);
1150 bool sk_stream_is_writeable(const struct sock *sk);
67 u64 ceph_sanitize_features(u64 features);
57 const char * ceph_entity_type_name(int type);
22 struct ceph_buffer * ceph_buffer_new(size_t len, gfp_t gfp);
23 void ceph_buffer_release(struct kref *kref);
31 void ceph_buffer_put(struct ceph_buffer *b);
77 bool ceph_msg_data_type_valid(enum ceph_msg_data_type type);
246 const char * ceph_pr_addr(const struct __kernel_sockaddr_storage *ss);
254 void ceph_msgr_flush();
262 void ceph_con_init(struct ceph_connection *con, void *private, const struct ceph_connection_operations *ops, struct ceph_messenger *msgr);
265 void ceph_con_open(struct ceph_connection *con, __u8 entity_type, __u64 entity_num, struct ceph_entity_addr *addr);
268 bool ceph_con_opened(struct ceph_connection *con);
269 void ceph_con_close(struct ceph_connection *con);
270 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
272 void ceph_msg_revoke(struct ceph_msg *msg);
273 void ceph_msg_revoke_incoming(struct ceph_msg *msg);
275 void ceph_con_keepalive(struct ceph_connection *con);
277 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment);
279 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist);
282 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, size_t length);
286 struct ceph_msg * ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail);
289 struct ceph_msg * ceph_msg_get(struct ceph_msg *msg);
290 void ceph_msg_put(struct ceph_msg *msg);
292 void ceph_msg_dump(struct ceph_msg *msg);
24 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg);
153 void ceph_encode_addr(struct ceph_entity_addr *a);
158 void ceph_decode_addr(struct ceph_entity_addr *a);
31 int ceph_pagelist_release(struct ceph_pagelist *pl);
170 int calc_pages_for(u64 off, u64 len);
185 bool con_flag_valid(unsigned long con_flag);
199 void con_flag_clear(struct ceph_connection *con, unsigned long con_flag);
206 void con_flag_set(struct ceph_connection *con, unsigned long con_flag);
213 bool con_flag_test(struct ceph_connection *con, unsigned long con_flag);
220 bool con_flag_test_and_clear(struct ceph_connection *con, unsigned long con_flag);
228 bool con_flag_test_and_set(struct ceph_connection *con, unsigned long con_flag);
238 struct kmem_cache *ceph_msg_cache = 0;
239 struct kmem_cache *ceph_msg_data_cache = 0;
242 char tag_msg = 7;
243 char tag_ack = 8;
244 char tag_keepalive = 9;
247 struct lock_class_key socket_class = { };
256 void queue_con(struct ceph_connection *con);
257 void cancel_con(struct ceph_connection *con);
258 void con_work(struct work_struct *work);
259 void con_fault(struct ceph_connection *con);
270 char addr_str[32U][64U] = { };
271 atomic_t addr_str_seq = { 0 };
273 struct page *zero_page = 0;
303 const char __kstrtab_ceph_pr_addr[13U] = { 'c', 'e', 'p', 'h', '_', 'p', 'r', '_', 'a', 'd', 'd', 'r', '\x0' };
303 const struct kernel_symbol __ksymtab_ceph_pr_addr;
303 const struct kernel_symbol __ksymtab_ceph_pr_addr = { (unsigned long)(&ceph_pr_addr), (const char *)(&__kstrtab_ceph_pr_addr) };
305 void encode_my_addr(struct ceph_messenger *msgr);
314 struct workqueue_struct *ceph_msgr_wq = 0;
316 int ceph_msgr_slab_init();
340 void ceph_msgr_slab_exit();
351 void _ceph_msgr_exit();
384 const char __kstrtab_ceph_msgr_init[15U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', 'r', '_', 'i', 'n', 'i', 't', '\x0' };
384 const struct kernel_symbol __ksymtab_ceph_msgr_init;
384 const struct kernel_symbol __ksymtab_ceph_msgr_init = { (unsigned long)(&ceph_msgr_init), (const char *)(&__kstrtab_ceph_msgr_init) };
392 const char __kstrtab_ceph_msgr_exit[15U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', 'r', '_', 'e', 'x', 'i', 't', '\x0' };
392 const struct kernel_symbol __ksymtab_ceph_msgr_exit;
392 const struct kernel_symbol __ksymtab_ceph_msgr_exit = { (unsigned long)(&ceph_msgr_exit), (const char *)(&__kstrtab_ceph_msgr_exit) };
398 const char __kstrtab_ceph_msgr_flush[16U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', 'r', '_', 'f', 'l', 'u', 's', 'h', '\x0' };
398 const struct kernel_symbol __ksymtab_ceph_msgr_flush;
398 const struct kernel_symbol __ksymtab_ceph_msgr_flush = { (unsigned long)(&ceph_msgr_flush), (const char *)(&__kstrtab_ceph_msgr_flush) };
402 void con_sock_state_init(struct ceph_connection *con);
413 void con_sock_state_connecting(struct ceph_connection *con);
424 void con_sock_state_connected(struct ceph_connection *con);
435 void con_sock_state_closing(struct ceph_connection *con);
448 void con_sock_state_closed(struct ceph_connection *con);
467 void ceph_sock_data_ready(struct sock *sk);
482 void ceph_sock_write_space(struct sock *sk);
505 void ceph_sock_state_change(struct sock *sk);
534 void set_sock_callbacks(struct socket *sock, struct ceph_connection *con);
552 int ceph_tcp_connect(struct ceph_connection *con);
592 int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len);
604 int ceph_tcp_recvpage(struct socket *sock, struct page *page, int page_offset___0, size_t length);
624 int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, size_t kvlen, size_t len, int more);
641 int __ceph_tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, bool more);
654 int ceph_tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, bool more);
676 int con_close_socket(struct ceph_connection *con);
703 void ceph_msg_remove(struct ceph_msg *msg);
712 void ceph_msg_remove_list(struct list_head *head);
721 void reset_connection(struct ceph_connection *con);
768 const char __kstrtab_ceph_con_close[15U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'n', '_', 'c', 'l', 'o', 's', 'e', '\x0' };
768 const struct kernel_symbol __ksymtab_ceph_con_close;
768 const struct kernel_symbol __ksymtab_ceph_con_close = { (unsigned long)(&ceph_con_close), (const char *)(&__kstrtab_ceph_con_close) };
791 const char __kstrtab_ceph_con_open[14U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'n', '_', 'o', 'p', 'e', 'n', '\x0' };
791 const struct kernel_symbol __ksymtab_ceph_con_open;
791 const struct kernel_symbol __ksymtab_ceph_con_open = { (unsigned long)(&ceph_con_open), (const char *)(&__kstrtab_ceph_con_open) };
823 const char __kstrtab_ceph_con_init[14U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'n', '_', 'i', 'n', 'i', 't', '\x0' };
823 const struct kernel_symbol __ksymtab_ceph_con_init;
823 const struct kernel_symbol __ksymtab_ceph_con_init = { (unsigned long)(&ceph_con_init), (const char *)(&__kstrtab_ceph_con_init) };
830 u32 get_global_seq(struct ceph_messenger *msgr, u32 gt);
842 void con_out_kvec_reset(struct ceph_connection *con);
849 void con_out_kvec_add(struct ceph_connection *con, size_t size, void *data);
870 void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, size_t length);
888 struct page * ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, size_t *page_offset___0, size_t *length);
915 bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
966 void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, size_t length);
988 struct page * ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, size_t *page_offset___0, size_t *length);
1007 bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
1038 void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, size_t length);
1063 struct page * ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, size_t *page_offset___0, size_t *length);
1087 bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
1129 void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor);
1153 void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length);
1175 struct page * ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, size_t *page_offset___0, size_t *length, bool *last_piece);
1211 bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
1248 void prepare_message_data(struct ceph_msg *msg, u32 data_len);
1262 void prepare_write_message_footer(struct ceph_connection *con);
1282 void prepare_write_message(struct ceph_connection *con);
1368 void prepare_write_ack(struct ceph_connection *con);
1389 void prepare_write_seq(struct ceph_connection *con);
1407 void prepare_write_keepalive(struct ceph_connection *con);
1419 struct ceph_auth_handshake * get_connect_authorizer(struct ceph_connection *con, int *auth_proto);
1448 void prepare_write_banner(struct ceph_connection *con);
1458 int prepare_write_connect(struct ceph_connection *con);
1516 int write_partial_kvec(struct ceph_connection *con);
1553 u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset___0, unsigned int length);
1573 int write_partial_message_data(struct ceph_connection *con);
1633 int write_partial_skip(struct ceph_connection *con);
1653 void prepare_read_banner(struct ceph_connection *con);
1659 void prepare_read_connect(struct ceph_connection *con);
1665 void prepare_read_ack(struct ceph_connection *con);
1671 void prepare_read_seq(struct ceph_connection *con);
1678 void prepare_read_tag(struct ceph_connection *con);
1688 int prepare_read_message(struct ceph_connection *con);
1698 int read_partial(struct ceph_connection *con, int end, int size, void *object);
1716 int read_partial_banner(struct ceph_connection *con);
1747 int read_partial_connect(struct ceph_connection *con);
1779 int verify_hello(struct ceph_connection *con);
1790 bool addr_is_blank(struct __kernel_sockaddr_storage *ss);
1805 int addr_port(struct __kernel_sockaddr_storage *ss);
1816 void addr_set_port(struct __kernel_sockaddr_storage *ss, int p);
1831 int ceph_pton(const char *str, size_t len, struct __kernel_sockaddr_storage *ss, char delim, const char **ipend);
1856 int ceph_dns_resolve_name(const char *name, size_t namelen, struct __kernel_sockaddr_storage *ss, char delim, const char **ipend);
1911 int ceph_parse_server_name(const char *name, size_t namelen, struct __kernel_sockaddr_storage *ss, char delim, const char **ipend);
1999 const char __kstrtab_ceph_parse_ips[15U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'r', 's', 'e', '_', 'i', 'p', 's', '\x0' };
1999 const struct kernel_symbol __ksymtab_ceph_parse_ips;
1999 const struct kernel_symbol __ksymtab_ceph_parse_ips = { (unsigned long)(&ceph_parse_ips), (const char *)(&__kstrtab_ceph_parse_ips) };
2001 int process_banner(struct ceph_connection *con);
2047 int process_connect(struct ceph_connection *con);
2219 int read_partial_ack(struct ceph_connection *con);
2230 void process_ack(struct ceph_connection *con);
2251 int read_partial_message_section(struct ceph_connection *con, struct kvec *section, unsigned int sec_len, u32 *crc);
2274 int read_partial_msg_data(struct ceph_connection *con);
2315 int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2317 int read_partial_message(struct ceph_connection *con);
2474 void process_message(struct ceph_connection *con);
2509 int try_write(struct ceph_connection *con);
2602 int try_read(struct ceph_connection *con);
2747 int queue_con_delay(struct ceph_connection *con, unsigned long delay);
2777 bool con_sock_closed(struct ceph_connection *con);
2806 bool con_backoff(struct ceph_connection *con);
2826 void con_fault_finish(struct ceph_connection *con);
2987 const char __kstrtab_ceph_messenger_init[20U] = { 'c', 'e', 'p', 'h', '_', 'm', 'e', 's', 's', 'e', 'n', 'g', 'e', 'r', '_', 'i', 'n', 'i', 't', '\x0' };
2987 const struct kernel_symbol __ksymtab_ceph_messenger_init;
2987 const struct kernel_symbol __ksymtab_ceph_messenger_init = { (unsigned long)(&ceph_messenger_init), (const char *)(&__kstrtab_ceph_messenger_init) };
2989 void clear_standby(struct ceph_connection *con);
3041 const char __kstrtab_ceph_con_send[14U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'n', '_', 's', 'e', 'n', 'd', '\x0' };
3041 const struct kernel_symbol __ksymtab_ceph_con_send;
3041 const struct kernel_symbol __ksymtab_ceph_con_send = { (unsigned long)(&ceph_con_send), (const char *)(&__kstrtab_ceph_con_send) };
3131 const char __kstrtab_ceph_con_keepalive[19U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'n', '_', 'k', 'e', 'e', 'p', 'a', 'l', 'i', 'v', 'e', '\x0' };
3131 const struct kernel_symbol __ksymtab_ceph_con_keepalive;
3131 const struct kernel_symbol __ksymtab_ceph_con_keepalive = { (unsigned long)(&ceph_con_keepalive), (const char *)(&__kstrtab_ceph_con_keepalive) };
3133 struct ceph_msg_data * ceph_msg_data_create(enum ceph_msg_data_type type);
3148 void ceph_msg_data_destroy(struct ceph_msg_data *data);
3178 const char __kstrtab_ceph_msg_data_add_pages[24U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 'd', 'a', 't', 'a', '_', 'a', 'd', 'd', '_', 'p', 'a', 'g', 'e', 's', '\x0' };
3178 const struct kernel_symbol __ksymtab_ceph_msg_data_add_pages;
3178 const struct kernel_symbol __ksymtab_ceph_msg_data_add_pages = { (unsigned long)(&ceph_msg_data_add_pages), (const char *)(&__kstrtab_ceph_msg_data_add_pages) };
3195 const char __kstrtab_ceph_msg_data_add_pagelist[27U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 'd', 'a', 't', 'a', '_', 'a', 'd', 'd', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '\x0' };
3195 const struct kernel_symbol __ksymtab_ceph_msg_data_add_pagelist;
3195 const struct kernel_symbol __ksymtab_ceph_msg_data_add_pagelist = { (unsigned long)(&ceph_msg_data_add_pagelist), (const char *)(&__kstrtab_ceph_msg_data_add_pagelist) };
3213 const char __kstrtab_ceph_msg_data_add_bio[22U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 'd', 'a', 't', 'a', '_', 'a', 'd', 'd', '_', 'b', 'i', 'o', '\x0' };
3213 const struct kernel_symbol __ksymtab_ceph_msg_data_add_bio;
3213 const struct kernel_symbol __ksymtab_ceph_msg_data_add_bio = { (unsigned long)(&ceph_msg_data_add_bio), (const char *)(&__kstrtab_ceph_msg_data_add_bio) };
3266 const char __kstrtab_ceph_msg_new[13U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 'n', 'e', 'w', '\x0' };
3266 const struct kernel_symbol __ksymtab_ceph_msg_new;
3266 const struct kernel_symbol __ksymtab_ceph_msg_new = { (unsigned long)(&ceph_msg_new), (const char *)(&__kstrtab_ceph_msg_new) };
3275 int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg);
3358 void ceph_msg_free(struct ceph_msg *m);
3365 void ceph_msg_release(struct kref *kref);
3404 const char __kstrtab_ceph_msg_get[13U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 'g', 'e', 't', '\x0' };
3404 const struct kernel_symbol __ksymtab_ceph_msg_get;
3404 const struct kernel_symbol __ksymtab_ceph_msg_get = { (unsigned long)(&ceph_msg_get), (const char *)(&__kstrtab_ceph_msg_get) };
3412 const char __kstrtab_ceph_msg_put[13U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 'p', 'u', 't', '\x0' };
3412 const struct kernel_symbol __ksymtab_ceph_msg_put;
3412 const struct kernel_symbol __ksymtab_ceph_msg_put = { (unsigned long)(&ceph_msg_put), (const char *)(&__kstrtab_ceph_msg_put) };
3433 const char __kstrtab_ceph_msg_dump[14U] = { 'c', 'e', 'p', 'h', '_', 'm', 's', 'g', '_', 'd', 'u', 'm', 'p', '\x0' };
3433 const struct kernel_symbol __ksymtab_ceph_msg_dump;
3433 const struct kernel_symbol __ksymtab_ceph_msg_dump = { (unsigned long)(&ceph_msg_dump), (const char *)(&__kstrtab_ceph_msg_dump) };
3472 void ldv_main1_sequence_infinite_withcheck_stateful();
178 void ldv_mutex_unlock_80(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_79(struct mutex *ldv_func_arg1);
26 mempool_t * mempool_create(int, mempool_alloc_t *, mempool_free_t *, void *);
33 void mempool_destroy(mempool_t *);
34 void * mempool_alloc(mempool_t *, gfp_t );
35 void mempool_free(void *, mempool_t *);
18 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, int front_len, int size, bool blocking, const char *name);
21 void ceph_msgpool_destroy(struct ceph_msgpool *pool);
22 struct ceph_msg * ceph_msgpool_get(struct ceph_msgpool *pool, int front_len);
89 void * msgpool_alloc(gfp_t gfp_mask, void *arg);
104 void msgpool_free(void *element, void *arg);
55 __u32 __le32_to_cpup(const __le32 *p);
178 void ldv_mutex_unlock_84(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_85(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_83(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_86(struct mutex *ldv_func_arg1);
36 int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
12 u32 get_unaligned_le32(const void *p);
23 u32 ceph_decode_32(void **p);
41 void ceph_decode_copy(void **p, void *pv, size_t n);
50 int ceph_has_room(void **p, void *end, size_t n);
110 const char __kstrtab_ceph_buffer_new[16U] = { 'c', 'e', 'p', 'h', '_', 'b', 'u', 'f', 'f', 'e', 'r', '_', 'n', 'e', 'w', '\x0' };
110 const struct kernel_symbol __ksymtab_ceph_buffer_new;
110 const struct kernel_symbol __ksymtab_ceph_buffer_new = { (unsigned long)(&ceph_buffer_new), (const char *)(&__kstrtab_ceph_buffer_new) };
120 const char __kstrtab_ceph_buffer_release[20U] = { 'c', 'e', 'p', 'h', '_', 'b', 'u', 'f', 'f', 'e', 'r', '_', 'r', 'e', 'l', 'e', 'a', 's', 'e', '\x0' };
120 const struct kernel_symbol __ksymtab_ceph_buffer_release;
120 const struct kernel_symbol __ksymtab_ceph_buffer_release = { (unsigned long)(&ceph_buffer_release), (const char *)(&__kstrtab_ceph_buffer_release) };
112 void list_del(struct list_head *);
178 void ldv_mutex_unlock_92(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_93(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_91(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_94(struct mutex *ldv_func_arg1);
371 void __free_pages(struct page *, unsigned int);
231 struct page * __page_cache_alloc(gfp_t );
33 int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len);
35 int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
37 int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
39 void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, struct ceph_pagelist_cursor *c);
42 int ceph_pagelist_truncate(struct ceph_pagelist *pl, struct ceph_pagelist_cursor *c);
86 void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl);
107 const char __kstrtab_ceph_pagelist_release[22U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '_', 'r', 'e', 'l', 'e', 'a', 's', 'e', '\x0' };
107 const struct kernel_symbol __ksymtab_ceph_pagelist_release;
107 const struct kernel_symbol __ksymtab_ceph_pagelist_release = { (unsigned long)(&ceph_pagelist_release), (const char *)(&__kstrtab_ceph_pagelist_release) };
109 int ceph_pagelist_addpage(struct ceph_pagelist *pl);
151 const char __kstrtab_ceph_pagelist_append[21U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '_', 'a', 'p', 'p', 'e', 'n', 'd', '\x0' };
151 const struct kernel_symbol __ksymtab_ceph_pagelist_append;
151 const struct kernel_symbol __ksymtab_ceph_pagelist_append = { (unsigned long)(&ceph_pagelist_append), (const char *)(&__kstrtab_ceph_pagelist_append) };
173 const char __kstrtab_ceph_pagelist_reserve[22U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '_', 'r', 'e', 's', 'e', 'r', 'v', 'e', '\x0' };
173 const struct kernel_symbol __ksymtab_ceph_pagelist_reserve;
173 const struct kernel_symbol __ksymtab_ceph_pagelist_reserve = { (unsigned long)(&ceph_pagelist_reserve), (const char *)(&__kstrtab_ceph_pagelist_reserve) };
188 const char __kstrtab_ceph_pagelist_free_reserve[27U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '_', 'f', 'r', 'e', 'e', '_', 'r', 'e', 's', 'e', 'r', 'v', 'e', '\x0' };
188 const struct kernel_symbol __ksymtab_ceph_pagelist_free_reserve;
188 const struct kernel_symbol __ksymtab_ceph_pagelist_free_reserve = { (unsigned long)(&ceph_pagelist_free_reserve), (const char *)(&__kstrtab_ceph_pagelist_free_reserve) };
198 const char __kstrtab_ceph_pagelist_set_cursor[25U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '_', 's', 'e', 't', '_', 'c', 'u', 'r', 's', 'o', 'r', '\x0' };
198 const struct kernel_symbol __ksymtab_ceph_pagelist_set_cursor;
198 const struct kernel_symbol __ksymtab_ceph_pagelist_set_cursor = { (unsigned long)(&ceph_pagelist_set_cursor), (const char *)(&__kstrtab_ceph_pagelist_set_cursor) };
226 const char __kstrtab_ceph_pagelist_truncate[23U] = { 'c', 'e', 'p', 'h', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '_', 't', 'r', 'u', 'n', 'c', 'a', 't', 'e', '\x0' };
226 const struct kernel_symbol __ksymtab_ceph_pagelist_truncate;
226 const struct kernel_symbol __ksymtab_ceph_pagelist_truncate = { (unsigned long)(&ceph_pagelist_truncate), (const char *)(&__kstrtab_ceph_pagelist_truncate) };
47 __u64 __le64_to_cpup(const __le64 *p);
63 __u16 __le16_to_cpup(const __le16 *p);
178 void ldv_mutex_unlock_100(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_101(struct mutex *ldv_func_arg1);
186 void ldv_mutex_unlock_104(struct mutex *ldv_func_arg1);
190 void ldv_mutex_unlock_106(struct mutex *ldv_func_arg1);
194 void ldv_mutex_unlock_108(struct mutex *ldv_func_arg1);
198 void ldv_mutex_unlock_110(struct mutex *ldv_func_arg1);
202 void ldv_mutex_unlock_112(struct mutex *ldv_func_arg1);
206 void ldv_mutex_unlock_114(struct mutex *ldv_func_arg1);
210 void ldv_mutex_unlock_116(struct mutex *ldv_func_arg1);
214 void ldv_mutex_unlock_118(struct mutex *ldv_func_arg1);
218 void ldv_mutex_unlock_119(struct mutex *ldv_func_arg1);
222 void ldv_mutex_unlock_121(struct mutex *ldv_func_arg1);
226 void ldv_mutex_unlock_122(struct mutex *ldv_func_arg1);
230 void ldv_mutex_unlock_125(struct mutex *ldv_func_arg1);
234 void ldv_mutex_unlock_127(struct mutex *ldv_func_arg1);
238 void ldv_mutex_unlock_129(struct mutex *ldv_func_arg1);
242 void ldv_mutex_unlock_131(struct mutex *ldv_func_arg1);
246 void ldv_mutex_unlock_133(struct mutex *ldv_func_arg1);
250 void ldv_mutex_unlock_134(struct mutex *ldv_func_arg1);
254 void ldv_mutex_unlock_136(struct mutex *ldv_func_arg1);
258 void ldv_mutex_unlock_138(struct mutex *ldv_func_arg1);
262 void ldv_mutex_unlock_140(struct mutex *ldv_func_arg1);
266 void ldv_mutex_unlock_142(struct mutex *ldv_func_arg1);
270 void ldv_mutex_unlock_144(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_99(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_102(struct mutex *ldv_func_arg1);
18 void ldv_mutex_lock_103(struct mutex *ldv_func_arg1);
22 void ldv_mutex_lock_105(struct mutex *ldv_func_arg1);
26 void ldv_mutex_lock_107(struct mutex *ldv_func_arg1);
30 void ldv_mutex_lock_109(struct mutex *ldv_func_arg1);
34 void ldv_mutex_lock_111(struct mutex *ldv_func_arg1);
38 void ldv_mutex_lock_113(struct mutex *ldv_func_arg1);
42 void ldv_mutex_lock_115(struct mutex *ldv_func_arg1);
46 void ldv_mutex_lock_117(struct mutex *ldv_func_arg1);
50 void ldv_mutex_lock_120(struct mutex *ldv_func_arg1);
54 void ldv_mutex_lock_123(struct mutex *ldv_func_arg1);
58 void ldv_mutex_lock_124(struct mutex *ldv_func_arg1);
62 void ldv_mutex_lock_126(struct mutex *ldv_func_arg1);
66 void ldv_mutex_lock_128(struct mutex *ldv_func_arg1);
70 void ldv_mutex_lock_130(struct mutex *ldv_func_arg1);
74 void ldv_mutex_lock_132(struct mutex *ldv_func_arg1);
78 void ldv_mutex_lock_135(struct mutex *ldv_func_arg1);
82 void ldv_mutex_lock_137(struct mutex *ldv_func_arg1);
86 void ldv_mutex_lock_139(struct mutex *ldv_func_arg1);
90 void ldv_mutex_lock_141(struct mutex *ldv_func_arg1);
94 void ldv_mutex_lock_143(struct mutex *ldv_func_arg1);
147 void ldv_mutex_lock_mutex_of_ceph_mon_client(struct mutex *lock);
151 void ldv_mutex_unlock_mutex_of_ceph_mon_client(struct mutex *lock);
146 void __wake_up(wait_queue_head_t *, unsigned int, int, void *);
73 void init_completion(struct completion *x);
93 int wait_for_completion_interruptible(struct completion *);
106 void complete(struct completion *);
107 void complete_all(struct completion *);
355 extern struct workqueue_struct *system_wq;
451 bool cancel_delayed_work_sync(struct delayed_work *);
558 bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
61 void rb_insert_color(struct rb_node *, struct rb_root *);
62 void rb_erase(struct rb_node *, struct rb_root *);
66 struct rb_node * rb_next(const struct rb_node *);
68 struct rb_node * rb_first(const struct rb_root *);
79 void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link);
90 struct ceph_monmap * ceph_monmap_decode(void *p, void *end);
103 int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got);
104 int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got);
106 void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
107 int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, unsigned long timeout);
110 int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf);
113 int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what, u64 *newest);
118 int ceph_monc_validate_auth(struct ceph_mon_client *monc);
120 int ceph_monc_create_snapid(struct ceph_mon_client *monc, u32 pool, u64 *snapid);
123 int ceph_monc_delete_snapid(struct ceph_mon_client *monc, u32 pool, u64 snapid);
7 u16 get_unaligned_le16(const void *p);
17 u64 get_unaligned_le64(const void *p);
42 void put_unaligned_le32(u32 val, void *p);
47 void put_unaligned_le64(u64 val, void *p);
17 u64 ceph_decode_64(void **p);
29 u16 ceph_decode_16(void **p);
168 void ceph_encode_64(void **p, u64 v);
173 void ceph_encode_32(void **p, u32 v);
210 void ceph_encode_string(void **p, void *end, const char *s, u32 len);
85 struct ceph_auth_client * ceph_auth_init(const char *name, const struct ceph_crypto_key *key);
87 void ceph_auth_destroy(struct ceph_auth_client *ac);
89 void ceph_auth_reset(struct ceph_auth_client *ac);
91 int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len);
93 int ceph_handle_auth_reply(struct ceph_auth_client *ac, void *buf, size_t len, void *reply_buf, size_t reply_len);
98 int ceph_build_auth(struct ceph_auth_client *ac, void *msg_buf, size_t msg_len);
101 int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
241 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg);
29 int ceph_debugfs_client_init(struct ceph_client *client);
113 const struct ceph_connection_operations mon_con_ops;
115 int __validate_auth(struct ceph_mon_client *monc);
184 void __send_prepared_auth_request(struct ceph_mon_client *monc, int len);
197 void __close_session(struct ceph_mon_client *monc);
213 int __open_session(struct ceph_mon_client *monc);
243 bool __sub_expired(struct ceph_mon_client *monc);
251 void __schedule_delayed(struct ceph_mon_client *monc);
266 void __send_subscribe(struct ceph_mon_client *monc);
318 void handle_subscribe_ack(struct ceph_mon_client *monc, struct ceph_msg *msg);
355 const char __kstrtab_ceph_monc_got_mdsmap[21U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'g', 'o', 't', '_', 'm', 'd', 's', 'm', 'a', 'p', '\x0' };
355 const struct kernel_symbol __ksymtab_ceph_monc_got_mdsmap;
355 const struct kernel_symbol __ksymtab_ceph_monc_got_mdsmap = { (unsigned long)(&ceph_monc_got_mdsmap), (const char *)(&__kstrtab_ceph_monc_got_mdsmap) };
379 const char __kstrtab_ceph_monc_request_next_osdmap[30U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'n', 'e', 'x', 't', '_', 'o', 's', 'd', 'm', 'a', 'p', '\x0' };
379 const struct kernel_symbol __ksymtab_ceph_monc_request_next_osdmap;
379 const struct kernel_symbol __ksymtab_ceph_monc_request_next_osdmap = { (unsigned long)(&ceph_monc_request_next_osdmap), (const char *)(&__kstrtab_ceph_monc_request_next_osdmap) };
405 const char __kstrtab_ceph_monc_wait_osdmap[22U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'w', 'a', 'i', 't', '_', 'o', 's', 'd', 'm', 'a', 'p', '\x0' };
405 const struct kernel_symbol __ksymtab_ceph_monc_wait_osdmap;
405 const struct kernel_symbol __ksymtab_ceph_monc_wait_osdmap = { (unsigned long)(&ceph_monc_wait_osdmap), (const char *)(&__kstrtab_ceph_monc_wait_osdmap) };
418 const char __kstrtab_ceph_monc_open_session[23U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'o', 'p', 'e', 'n', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\x0' };
418 const struct kernel_symbol __ksymtab_ceph_monc_open_session;
418 const struct kernel_symbol __ksymtab_ceph_monc_open_session = { (unsigned long)(&ceph_monc_open_session), (const char *)(&__kstrtab_ceph_monc_open_session) };
424 bool have_debugfs_info(struct ceph_mon_client *monc);
435 void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg *msg);
495 struct ceph_mon_generic_request * __lookup_generic_req(struct ceph_mon_client *monc, u64 tid);
513 void __insert_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *new);
535 void release_generic_request(struct kref *kref);
548 void put_generic_request(struct ceph_mon_generic_request *req);
553 void get_generic_request(struct ceph_mon_generic_request *req);
558 struct ceph_msg * get_generic_reply(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip);
587 int __do_generic_request(struct ceph_mon_client *monc, u64 tid, struct ceph_mon_generic_request *req);
611 int do_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *req);
626 void handle_statfs_reply(struct ceph_mon_client *monc, struct ceph_msg *msg);
697 const char __kstrtab_ceph_monc_do_statfs[20U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'd', 'o', '_', 's', 't', 'a', 't', 'f', 's', '\x0' };
697 const struct kernel_symbol __ksymtab_ceph_monc_do_statfs;
697 const struct kernel_symbol __ksymtab_ceph_monc_do_statfs = { (unsigned long)(&ceph_monc_do_statfs), (const char *)(&__kstrtab_ceph_monc_do_statfs) };
699 void handle_get_version_reply(struct ceph_mon_client *monc, struct ceph_msg *msg);
787 const char __kstrtab_ceph_monc_do_get_version[25U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'd', 'o', '_', 'g', 'e', 't', '_', 'v', 'e', 'r', 's', 'i', 'o', 'n', '\x0' };
787 const struct kernel_symbol __ksymtab_ceph_monc_do_get_version;
787 const struct kernel_symbol __ksymtab_ceph_monc_do_get_version = { (unsigned long)(&ceph_monc_do_get_version), (const char *)(&__kstrtab_ceph_monc_do_get_version) };
792 int get_poolop_reply_buf(const char *src, size_t src_len, char *dst, size_t dst_len);
808 void handle_poolop_reply(struct ceph_mon_client *monc, struct ceph_msg *msg);
847 int do_poolop(struct ceph_mon_client *monc, u32 op, u32 pool, u64 snapid, char *buf, int len);
901 const char __kstrtab_ceph_monc_create_snapid[24U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'c', 'r', 'e', 'a', 't', 'e', '_', 's', 'n', 'a', 'p', 'i', 'd', '\x0' };
901 const struct kernel_symbol __ksymtab_ceph_monc_create_snapid;
901 const struct kernel_symbol __ksymtab_ceph_monc_create_snapid = { (unsigned long)(&ceph_monc_create_snapid), (const char *)(&__kstrtab_ceph_monc_create_snapid) };
914 void __resend_generic_request(struct ceph_mon_client *monc);
932 void delayed_work(struct work_struct *work);
958 int build_initial_monmap(struct ceph_mon_client *monc);
1062 const char __kstrtab_ceph_monc_init[15U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'i', 'n', 'i', 't', '\x0' };
1062 const struct kernel_symbol __ksymtab_ceph_monc_init;
1062 const struct kernel_symbol __ksymtab_ceph_monc_init = { (unsigned long)(&ceph_monc_init), (const char *)(&__kstrtab_ceph_monc_init) };
1091 const char __kstrtab_ceph_monc_stop[15U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 's', 't', 'o', 'p', '\x0' };
1091 const struct kernel_symbol __ksymtab_ceph_monc_stop;
1091 const struct kernel_symbol __ksymtab_ceph_monc_stop = { (unsigned long)(&ceph_monc_stop), (const char *)(&__kstrtab_ceph_monc_stop) };
1093 void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg);
1165 const char __kstrtab_ceph_monc_validate_auth[24U] = { 'c', 'e', 'p', 'h', '_', 'm', 'o', 'n', 'c', '_', 'v', 'a', 'l', 'i', 'd', 'a', 't', 'e', '_', 'a', 'u', 't', 'h', '\x0' };
1165 const struct kernel_symbol __ksymtab_ceph_monc_validate_auth;
1165 const struct kernel_symbol __ksymtab_ceph_monc_validate_auth = { (unsigned long)(&ceph_monc_validate_auth), (const char *)(&__kstrtab_ceph_monc_validate_auth) };
1170 void dispatch(struct ceph_connection *con, struct ceph_msg *msg);
1222 struct ceph_msg * mon_alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip);
1272 void mon_fault(struct ceph_connection *con);
1307 struct ceph_connection * con_get(struct ceph_connection *con);
1312 void con_put(struct ceph_connection *con);
1316 const struct ceph_connection_operations mon_con_ops = { &con_get, &con_put, &dispatch, 0, 0, 0, &mon_fault, 0, &mon_alloc_msg };
1361 void ldv_main5_sequence_infinite_withcheck_stateful();
60 void list_add(struct list_head *new, struct list_head *head);
153 void list_move(struct list_head *list, struct list_head *head);
292 void list_splice(const struct list_head *list, struct list_head *head);
15 void __cmpxchg_wrong_size();
116 int atomic_dec_and_test(atomic_t *v);
175 int atomic_cmpxchg(atomic_t *v, int old, int new);
194 int __atomic_add_unless(atomic_t *v, int a, int u);
51 int atomic_add_unless(atomic_t *v, int a, int u);
178 void ldv_mutex_unlock_192(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_193(struct mutex *ldv_func_arg1);
186 void ldv_mutex_unlock_196(struct mutex *ldv_func_arg1);
190 void ldv_mutex_unlock_198(struct mutex *ldv_func_arg1);
194 void ldv_mutex_unlock_200(struct mutex *ldv_func_arg1);
198 void ldv_mutex_unlock_202(struct mutex *ldv_func_arg1);
202 void ldv_mutex_unlock_204(struct mutex *ldv_func_arg1);
206 void ldv_mutex_unlock_205(struct mutex *ldv_func_arg1);
210 void ldv_mutex_unlock_206(struct mutex *ldv_func_arg1);
214 void ldv_mutex_unlock_208(struct mutex *ldv_func_arg1);
218 void ldv_mutex_unlock_210(struct mutex *ldv_func_arg1);
222 void ldv_mutex_unlock_212(struct mutex *ldv_func_arg1);
226 void ldv_mutex_unlock_214(struct mutex *ldv_func_arg1);
230 void ldv_mutex_unlock_216(struct mutex *ldv_func_arg1);
234 void ldv_mutex_unlock_218(struct mutex *ldv_func_arg1);
238 void ldv_mutex_unlock_220(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_191(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_194(struct mutex *ldv_func_arg1);
18 void ldv_mutex_lock_195(struct mutex *ldv_func_arg1);
22 void ldv_mutex_lock_197(struct mutex *ldv_func_arg1);
26 void ldv_mutex_lock_199(struct mutex *ldv_func_arg1);
30 void ldv_mutex_lock_201(struct mutex *ldv_func_arg1);
34 void ldv_mutex_lock_203(struct mutex *ldv_func_arg1);
38 void ldv_mutex_lock_207(struct mutex *ldv_func_arg1);
42 void ldv_mutex_lock_209(struct mutex *ldv_func_arg1);
46 void ldv_mutex_lock_211(struct mutex *ldv_func_arg1);
50 void ldv_mutex_lock_213(struct mutex *ldv_func_arg1);
54 void ldv_mutex_lock_215(struct mutex *ldv_func_arg1);
58 void ldv_mutex_lock_217(struct mutex *ldv_func_arg1);
62 void ldv_mutex_lock_219(struct mutex *ldv_func_arg1);
131 void ldv_mutex_lock_request_mutex_of_ceph_osd_client(struct mutex *lock);
135 void ldv_mutex_unlock_request_mutex_of_ceph_osd_client(struct mutex *lock);
84 void __init_rwsem(struct rw_semaphore *, const char *, struct lock_class_key *);
108 void down_read(struct rw_semaphore *);
118 void down_write(struct rw_semaphore *);
128 void up_read(struct rw_semaphore *);
133 void up_write(struct rw_semaphore *);
138 void downgrade_write(struct rw_semaphore *);
91 void wait_for_completion(struct completion *);
431 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *);
471 bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
54 void * mempool_kmalloc(gfp_t , void *);
55 void mempool_kfree(void *, void *);
56 mempool_t * mempool_create_kmalloc_pool(int min_nr, size_t size);
37 void put_unaligned_le16(u16 val, void *p);
332 const char * ceph_osd_op_name(int op);
35 u8 ceph_decode_8(void **p);
143 void ceph_encode_timespec(struct ceph_timespec *tv, const struct timespec *ts);
178 void ceph_encode_16(void **p, u16 v);
183 void ceph_encode_8(void **p, u8 v);
132 void ceph_oid_copy(struct ceph_object_id *dest, struct ceph_object_id *src);
140 int ceph_osd_exists(struct ceph_osdmap *map, int osd);
146 int ceph_osd_is_up(struct ceph_osdmap *map, int osd);
157 bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag);
165 struct ceph_entity_addr * ceph_osd_addr(struct ceph_osdmap *map, int osd);
173 int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid);
196 struct ceph_osdmap * ceph_osdmap_decode(void **p, void *end);
197 struct ceph_osdmap * osdmap_apply_incremental(void **p, void *end, struct ceph_osdmap *map, struct ceph_messenger *msgr);
200 void ceph_osdmap_destroy(struct ceph_osdmap *map);
203 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u64 off, u64 len, u64 *ono, u64 *oxoff, u64 *oxlen);
208 int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap, struct ceph_object_locator *oloc, struct ceph_object_id *oid, struct ceph_pg *pg_out);
213 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, int *osds, int *primary);
219 struct ceph_pg_pool_info * ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id);
102 int ceph_auth_create_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth);
105 void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a);
107 int ceph_auth_update_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *a);
110 int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len);
113 void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type);
21 void ceph_pagelist_init(struct ceph_pagelist *pl);
244 void osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode);
247 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages);
253 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq);
257 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, unsigned int which, u64 length);
260 struct ceph_osd_data * osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, unsigned int which);
263 struct ceph_osd_data * osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, unsigned int which);
267 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages);
272 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, unsigned int which, struct ceph_pagelist *pagelist);
276 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, unsigned int which, struct bio *bio, size_t bio_length);
281 void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *osd_req, unsigned int which, struct ceph_pagelist *pagelist);
284 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages);
289 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages);
295 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method);
298 void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 cookie, u64 version, int flag);
301 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, unsigned int which, u64 expected_object_size, u64 expected_write_size);
306 struct ceph_osd_request * ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, unsigned int num_ops, bool use_mempool, gfp_t gfp_flags);
312 void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, struct ceph_snap_context *snapc, u64 snap_id, struct timespec *mtime);
317 struct ceph_osd_request * ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, struct ceph_vino vino, u64 off, u64 *plen, int num_ops, int opcode, int flags, struct ceph_snap_context *snapc, u32 truncate_seq, u64 truncate_size, bool use_mempool);
326 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
329 void ceph_osdc_get_request(struct ceph_osd_request *req);
330 void ceph_osdc_put_request(struct ceph_osd_request *req);
332 int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail);
335 void ceph_osdc_cancel_request(struct ceph_osd_request *req);
336 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
338 void ceph_osdc_sync(struct ceph_osd_client *osdc);
340 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
342 int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, u64 off, u64 *plen, u32 truncate_seq, u64 truncate_size, struct page **pages, int num_pages, int page_align);
350 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, struct ceph_snap_context *snapc, u64 off, u64 len, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, struct page **pages, int num_pages);
360 int ceph_osdc_create_event(struct ceph_osd_client *osdc, void (*event_cb)(u64 , u64 , u8 , void *), void *data, struct ceph_osd_event **pevent);
363 void ceph_osdc_cancel_event(struct ceph_osd_event *event);
364 void ceph_osdc_put_event(struct ceph_osd_event *event);
162 struct ceph_snap_context * ceph_get_snap_context(struct ceph_snap_context *sc);
164 void ceph_put_snap_context(struct ceph_snap_context *sc);
207 void ceph_release_page_vector(struct page **pages, int num_pages);
105 struct kmem_cache *ceph_osd_request_cache = 0;
107 const struct ceph_connection_operations osd_con_ops;
109 void __send_queued(struct ceph_osd_client *osdc);
110 int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
111 void __register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
113 void __unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
115 void __send_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
143 int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, u64 *objnum, u64 *objoff, u64 *objlen);
165 void ceph_osd_data_init(struct ceph_osd_data *osd_data);
171 void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages);
183 void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, struct ceph_pagelist *pagelist);
191 void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, struct bio *bio, size_t bio_length);
207 struct ceph_osd_data * osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which);
220 const char __kstrtab_osd_req_op_extent_osd_data[27U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'e', 'x', 't', 'e', 'n', 't', '_', 'o', 's', 'd', '_', 'd', 'a', 't', 'a', '\x0' };
220 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data;
220 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data = { (unsigned long)(&osd_req_op_extent_osd_data), (const char *)(&__kstrtab_osd_req_op_extent_osd_data) };
228 const char __kstrtab_osd_req_op_cls_response_data[29U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'c', 'l', 's', '_', 'r', 'e', 's', 'p', 'o', 'n', 's', 'e', '_', 'd', 'a', 't', 'a', '\x0' };
228 const struct kernel_symbol __ksymtab_osd_req_op_cls_response_data;
228 const struct kernel_symbol __ksymtab_osd_req_op_cls_response_data = { (unsigned long)(&osd_req_op_cls_response_data), (const char *)(&__kstrtab_osd_req_op_cls_response_data) };
241 const char __kstrtab_osd_req_op_raw_data_in_pages[29U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'r', 'a', 'w', '_', 'd', 'a', 't', 'a', '_', 'i', 'n', '_', 'p', 'a', 'g', 'e', 's', '\x0' };
241 const struct kernel_symbol __ksymtab_osd_req_op_raw_data_in_pages;
241 const struct kernel_symbol __ksymtab_osd_req_op_raw_data_in_pages = { (unsigned long)(&osd_req_op_raw_data_in_pages), (const char *)(&__kstrtab_osd_req_op_raw_data_in_pages) };
254 const char __kstrtab_osd_req_op_extent_osd_data_pages[33U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'e', 'x', 't', 'e', 'n', 't', '_', 'o', 's', 'd', '_', 'd', 'a', 't', 'a', '_', 'p', 'a', 'g', 'e', 's', '\x0' };
254 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data_pages;
254 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data_pages = { (unsigned long)(&osd_req_op_extent_osd_data_pages), (const char *)(&__kstrtab_osd_req_op_extent_osd_data_pages) };
264 const char __kstrtab_osd_req_op_extent_osd_data_pagelist[36U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'e', 'x', 't', 'e', 'n', 't', '_', 'o', 's', 'd', '_', 'd', 'a', 't', 'a', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '\x0' };
264 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data_pagelist;
264 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data_pagelist = { (unsigned long)(&osd_req_op_extent_osd_data_pagelist), (const char *)(&__kstrtab_osd_req_op_extent_osd_data_pagelist) };
275 const char __kstrtab_osd_req_op_extent_osd_data_bio[31U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'e', 'x', 't', 'e', 'n', 't', '_', 'o', 's', 'd', '_', 'd', 'a', 't', 'a', '_', 'b', 'i', 'o', '\x0' };
275 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data_bio;
275 const struct kernel_symbol __ksymtab_osd_req_op_extent_osd_data_bio = { (unsigned long)(&osd_req_op_extent_osd_data_bio), (const char *)(&__kstrtab_osd_req_op_extent_osd_data_bio) };
278 void osd_req_op_cls_request_info_pagelist(struct ceph_osd_request *osd_req, unsigned int which, struct ceph_pagelist *pagelist);
297 const char __kstrtab_osd_req_op_cls_request_data_pagelist[37U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'c', 'l', 's', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'd', 'a', 't', 'a', '_', 'p', 'a', 'g', 'e', 'l', 'i', 's', 't', '\x0' };
297 const struct kernel_symbol __ksymtab_osd_req_op_cls_request_data_pagelist;
297 const struct kernel_symbol __ksymtab_osd_req_op_cls_request_data_pagelist = { (unsigned long)(&osd_req_op_cls_request_data_pagelist), (const char *)(&__kstrtab_osd_req_op_cls_request_data_pagelist) };
309 const char __kstrtab_osd_req_op_cls_request_data_pages[34U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'c', 'l', 's', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'd', 'a', 't', 'a', '_', 'p', 'a', 'g', 'e', 's', '\x0' };
309 const struct kernel_symbol __ksymtab_osd_req_op_cls_request_data_pages;
309 const struct kernel_symbol __ksymtab_osd_req_op_cls_request_data_pages = { (unsigned long)(&osd_req_op_cls_request_data_pages), (const char *)(&__kstrtab_osd_req_op_cls_request_data_pages) };
321 const char __kstrtab_osd_req_op_cls_response_data_pages[35U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'c', 'l', 's', '_', 'r', 'e', 's', 'p', 'o', 'n', 's', 'e', '_', 'd', 'a', 't', 'a', '_', 'p', 'a', 'g', 'e', 's', '\x0' };
321 const struct kernel_symbol __ksymtab_osd_req_op_cls_response_data_pages;
321 const struct kernel_symbol __ksymtab_osd_req_op_cls_response_data_pages = { (unsigned long)(&osd_req_op_cls_response_data_pages), (const char *)(&__kstrtab_osd_req_op_cls_response_data_pages) };
323 u64 ceph_osd_data_length(struct ceph_osd_data *osd_data);
342 void ceph_osd_data_release(struct ceph_osd_data *osd_data);
354 void osd_req_op_data_release(struct ceph_osd_request *osd_req, unsigned int which);
380 void ceph_osdc_release_request(struct kref *kref);
419 const char __kstrtab_ceph_osdc_get_request[22U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'g', 'e', 't', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
419 const struct kernel_symbol __ksymtab_ceph_osdc_get_request;
419 const struct kernel_symbol __ksymtab_ceph_osdc_get_request = { (unsigned long)(&ceph_osdc_get_request), (const char *)(&__kstrtab_ceph_osdc_get_request) };
427 const char __kstrtab_ceph_osdc_put_request[22U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'p', 'u', 't', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
427 const struct kernel_symbol __ksymtab_ceph_osdc_put_request;
427 const struct kernel_symbol __ksymtab_ceph_osdc_put_request = { (unsigned long)(&ceph_osdc_put_request), (const char *)(&__kstrtab_ceph_osdc_put_request) };
506 const char __kstrtab_ceph_osdc_alloc_request[24U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'a', 'l', 'l', 'o', 'c', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
506 const struct kernel_symbol __ksymtab_ceph_osdc_alloc_request;
506 const struct kernel_symbol __ksymtab_ceph_osdc_alloc_request = { (unsigned long)(&ceph_osdc_alloc_request), (const char *)(&__kstrtab_ceph_osdc_alloc_request) };
508 bool osd_req_opcode_valid(u16 opcode);
584 struct ceph_osd_req_op * _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode);
604 const char __kstrtab_osd_req_op_init[16U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'i', 'n', 'i', 't', '\x0' };
604 const struct kernel_symbol __ksymtab_osd_req_op_init;
604 const struct kernel_symbol __ksymtab_osd_req_op_init = { (unsigned long)(&osd_req_op_init), (const char *)(&__kstrtab_osd_req_op_init) };
627 const char __kstrtab_osd_req_op_extent_init[23U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'e', 'x', 't', 'e', 'n', 't', '_', 'i', 'n', 'i', 't', '\x0' };
627 const struct kernel_symbol __ksymtab_osd_req_op_extent_init;
627 const struct kernel_symbol __ksymtab_osd_req_op_extent_init = { (unsigned long)(&osd_req_op_extent_init), (const char *)(&__kstrtab_osd_req_op_extent_init) };
646 const char __kstrtab_osd_req_op_extent_update[25U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'e', 'x', 't', 'e', 'n', 't', '_', 'u', 'p', 'd', 'a', 't', 'e', '\x0' };
646 const struct kernel_symbol __ksymtab_osd_req_op_extent_update;
646 const struct kernel_symbol __ksymtab_osd_req_op_extent_update = { (unsigned long)(&osd_req_op_extent_update), (const char *)(&__kstrtab_osd_req_op_extent_update) };
682 const char __kstrtab_osd_req_op_cls_init[20U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'c', 'l', 's', '_', 'i', 'n', 'i', 't', '\x0' };
682 const struct kernel_symbol __ksymtab_osd_req_op_cls_init;
682 const struct kernel_symbol __ksymtab_osd_req_op_cls_init = { (unsigned long)(&osd_req_op_cls_init), (const char *)(&__kstrtab_osd_req_op_cls_init) };
697 const char __kstrtab_osd_req_op_watch_init[22U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'w', 'a', 't', 'c', 'h', '_', 'i', 'n', 'i', 't', '\x0' };
697 const struct kernel_symbol __ksymtab_osd_req_op_watch_init;
697 const struct kernel_symbol __ksymtab_osd_req_op_watch_init = { (unsigned long)(&osd_req_op_watch_init), (const char *)(&__kstrtab_osd_req_op_watch_init) };
717 const char __kstrtab_osd_req_op_alloc_hint_init[27U] = { 'o', 's', 'd', '_', 'r', 'e', 'q', '_', 'o', 'p', '_', 'a', 'l', 'l', 'o', 'c', '_', 'h', 'i', 'n', 't', '_', 'i', 'n', 'i', 't', '\x0' };
717 const struct kernel_symbol __ksymtab_osd_req_op_alloc_hint_init;
717 const struct kernel_symbol __ksymtab_osd_req_op_alloc_hint_init = { (unsigned long)(&osd_req_op_alloc_hint_init), (const char *)(&__kstrtab_osd_req_op_alloc_hint_init) };
719 void ceph_osdc_msg_data_add(struct ceph_msg *msg, struct ceph_osd_data *osd_data);
741 u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, unsigned int which);
908 const char __kstrtab_ceph_osdc_new_request[22U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'n', 'e', 'w', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
908 const struct kernel_symbol __ksymtab_ceph_osdc_new_request;
908 const struct kernel_symbol __ksymtab_ceph_osdc_new_request = { (unsigned long)(&ceph_osdc_new_request), (const char *)(&__kstrtab_ceph_osdc_new_request) };
913 void __insert_request(struct ceph_osd_client *osdc, struct ceph_osd_request *new);
935 struct ceph_osd_request * __lookup_request(struct ceph_osd_client *osdc, u64 tid);
954 struct ceph_osd_request * __lookup_request_ge(struct ceph_osd_client *osdc, u64 tid);
978 void __kick_osd_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
1041 void osd_reset(struct ceph_connection *con);
1061 struct ceph_osd * create_osd(struct ceph_osd_client *osdc, int onum);
1084 struct ceph_osd * get_osd(struct ceph_osd *osd);
1096 void put_osd(struct ceph_osd *osd);
1111 void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
1123 void remove_all_osds(struct ceph_osd_client *osdc);
1135 void __move_osd_to_lru(struct ceph_osd_client *osdc, struct ceph_osd *osd);
1145 void maybe_move_osd_to_lru(struct ceph_osd_client *osdc, struct ceph_osd *osd);
1155 void __remove_osd_from_lru(struct ceph_osd *osd);
1162 void remove_old_osds(struct ceph_osd_client *osdc);
1212 void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new);
1234 struct ceph_osd * __lookup_osd(struct ceph_osd_client *osdc, int o);
1251 void __schedule_osd_timeout(struct ceph_osd_client *osdc);
1257 void __cancel_osd_timeout(struct ceph_osd_client *osdc);
1284 void __unregister_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
1320 void __cancel_request(struct ceph_osd_request *req);
1328 void __register_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
1372 const char __kstrtab_ceph_osdc_set_request_linger[29U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 's', 'e', 't', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'l', 'i', 'n', 'g', 'e', 'r', '\x0' };
1372 const struct kernel_symbol __ksymtab_ceph_osdc_set_request_linger;
1372 const struct kernel_symbol __ksymtab_ceph_osdc_set_request_linger = { (unsigned long)(&ceph_osdc_set_request_linger), (const char *)(&__kstrtab_ceph_osdc_set_request_linger) };
1380 bool __req_should_be_paused(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
1393 int __calc_request_pg(struct ceph_osdmap *osdmap, struct ceph_osd_request *req, struct ceph_pg *pg_out);
1439 int __map_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, int force_resend);
1567 int __ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail);
1607 void handle_timeout(struct work_struct *work);
1652 void handle_osds_timeout(struct work_struct *work);
1669 int ceph_oloc_decode(void **p, void *end, struct ceph_object_locator *oloc);
1729 int ceph_redirect_decode(void **p, void *end, struct ceph_request_redirect *redir);
1772 void complete_request(struct ceph_osd_request *req);
1781 void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, struct ceph_connection *con);
1974 void reset_changed_osds(struct ceph_osd_client *osdc);
1997 void kick_requests(struct ceph_osd_client *osdc, bool force_resend, bool force_resend_writes);
2227 void __release_event(struct kref *kref);
2236 void get_event(struct ceph_osd_event *event);
2245 const char __kstrtab_ceph_osdc_put_event[20U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'p', 'u', 't', '_', 'e', 'v', 'e', 'n', 't', '\x0' };
2245 const struct kernel_symbol __ksymtab_ceph_osdc_put_event;
2245 const struct kernel_symbol __ksymtab_ceph_osdc_put_event = { (unsigned long)(&ceph_osdc_put_event), (const char *)(&__kstrtab_ceph_osdc_put_event) };
2247 void __insert_event(struct ceph_osd_client *osdc, struct ceph_osd_event *new);
2269 struct ceph_osd_event * __find_event(struct ceph_osd_client *osdc, u64 cookie);
2289 void __remove_event(struct ceph_osd_event *event);
2330 const char __kstrtab_ceph_osdc_create_event[23U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'c', 'r', 'e', 'a', 't', 'e', '_', 'e', 'v', 'e', 'n', 't', '\x0' };
2330 const struct kernel_symbol __ksymtab_ceph_osdc_create_event;
2330 const struct kernel_symbol __ksymtab_ceph_osdc_create_event = { (unsigned long)(&ceph_osdc_create_event), (const char *)(&__kstrtab_ceph_osdc_create_event) };
2342 const char __kstrtab_ceph_osdc_cancel_event[23U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'c', 'a', 'n', 'c', 'e', 'l', '_', 'e', 'v', 'e', 'n', 't', '\x0' };
2342 const struct kernel_symbol __ksymtab_ceph_osdc_cancel_event;
2342 const struct kernel_symbol __ksymtab_ceph_osdc_cancel_event = { (unsigned long)(&ceph_osdc_cancel_event), (const char *)(&__kstrtab_ceph_osdc_cancel_event) };
2345 void do_event_work(struct work_struct *work);
2365 void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg);
2517 const char __kstrtab_ceph_osdc_build_request[24U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'b', 'u', 'i', 'l', 'd', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
2517 const struct kernel_symbol __ksymtab_ceph_osdc_build_request;
2517 const struct kernel_symbol __ksymtab_ceph_osdc_build_request = { (unsigned long)(&ceph_osdc_build_request), (const char *)(&__kstrtab_ceph_osdc_build_request) };
2538 const char __kstrtab_ceph_osdc_start_request[24U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 's', 't', 'a', 'r', 't', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
2538 const struct kernel_symbol __ksymtab_ceph_osdc_start_request;
2538 const struct kernel_symbol __ksymtab_ceph_osdc_start_request = { (unsigned long)(&ceph_osdc_start_request), (const char *)(&__kstrtab_ceph_osdc_start_request) };
2557 const char __kstrtab_ceph_osdc_cancel_request[25U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'c', 'a', 'n', 'c', 'e', 'l', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
2557 const struct kernel_symbol __ksymtab_ceph_osdc_cancel_request;
2557 const struct kernel_symbol __ksymtab_ceph_osdc_cancel_request = { (unsigned long)(&ceph_osdc_cancel_request), (const char *)(&__kstrtab_ceph_osdc_cancel_request) };
2581 const char __kstrtab_ceph_osdc_wait_request[23U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'w', 'a', 'i', 't', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\x0' };
2581 const struct kernel_symbol __ksymtab_ceph_osdc_wait_request;
2581 const struct kernel_symbol __ksymtab_ceph_osdc_wait_request = { (unsigned long)(&ceph_osdc_wait_request), (const char *)(&__kstrtab_ceph_osdc_wait_request) };
2615 const char __kstrtab_ceph_osdc_sync[15U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 's', 'y', 'n', 'c', '\x0' };
2615 const struct kernel_symbol __ksymtab_ceph_osdc_sync;
2615 const struct kernel_symbol __ksymtab_ceph_osdc_sync = { (unsigned long)(&ceph_osdc_sync), (const char *)(&__kstrtab_ceph_osdc_sync) };
2625 const char __kstrtab_ceph_osdc_flush_notifies[25U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'f', 'l', 'u', 's', 'h', '_', 'n', 'o', 't', 'i', 'f', 'i', 'e', 's', '\x0' };
2625 const struct kernel_symbol __ksymtab_ceph_osdc_flush_notifies;
2625 const struct kernel_symbol __ksymtab_ceph_osdc_flush_notifies = { (unsigned long)(&ceph_osdc_flush_notifies), (const char *)(&__kstrtab_ceph_osdc_flush_notifies) };
2750 const char __kstrtab_ceph_osdc_readpages[20U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'r', 'e', 'a', 'd', 'p', 'a', 'g', 'e', 's', '\x0' };
2750 const struct kernel_symbol __ksymtab_ceph_osdc_readpages;
2750 const struct kernel_symbol __ksymtab_ceph_osdc_readpages = { (unsigned long)(&ceph_osdc_readpages), (const char *)(&__kstrtab_ceph_osdc_readpages) };
2793 const char __kstrtab_ceph_osdc_writepages[21U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'w', 'r', 'i', 't', 'e', 'p', 'a', 'g', 'e', 's', '\x0' };
2793 const struct kernel_symbol __ksymtab_ceph_osdc_writepages;
2793 const struct kernel_symbol __ksymtab_ceph_osdc_writepages = { (unsigned long)(&ceph_osdc_writepages), (const char *)(&__kstrtab_ceph_osdc_writepages) };
2805 const char __kstrtab_ceph_osdc_setup[16U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 's', 'e', 't', 'u', 'p', '\x0' };
2805 const struct kernel_symbol __ksymtab_ceph_osdc_setup;
2805 const struct kernel_symbol __ksymtab_ceph_osdc_setup = { (unsigned long)(&ceph_osdc_setup), (const char *)(&__kstrtab_ceph_osdc_setup) };
2813 const char __kstrtab_ceph_osdc_cleanup[18U] = { 'c', 'e', 'p', 'h', '_', 'o', 's', 'd', 'c', '_', 'c', 'l', 'e', 'a', 'n', 'u', 'p', '\x0' };
2813 const struct kernel_symbol __ksymtab_ceph_osdc_cleanup;
2813 const struct kernel_symbol __ksymtab_ceph_osdc_cleanup = { (unsigned long)(&ceph_osdc_cleanup), (const char *)(&__kstrtab_ceph_osdc_cleanup) };
2818 void dispatch___0(struct ceph_connection *con, struct ceph_msg *msg);
2851 struct ceph_msg * get_reply(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip);
2925 struct ceph_msg * alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip);
2951 struct ceph_connection * get_osd_con(struct ceph_connection *con);
2959 void put_osd_con(struct ceph_connection *con);
2972 struct ceph_auth_handshake * get_authorizer(struct ceph_connection *con, int *proto, int force_new);
3001 int verify_authorizer_reply(struct ceph_connection *con, int len);
3010 int invalidate_authorizer(struct ceph_connection *con);
3020 const struct ceph_connection_operations osd_con_ops = { &get_osd_con, &put_osd_con, &dispatch___0, &get_authorizer, &verify_authorizer_reply, &invalidate_authorizer, &osd_reset, 0, &alloc_msg };
3068 void ldv_main6_sequence_infinite_withcheck_stateful();
178 void ldv_mutex_unlock_252(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_253(struct mutex *ldv_func_arg1);
186 void ldv_mutex_unlock_256(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_251(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_254(struct mutex *ldv_func_arg1);
18 void ldv_mutex_lock_255(struct mutex *ldv_func_arg1);
23 void ldv_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock);
27 void ldv_mutex_unlock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock);
143 void * krealloc(const void *, size_t , gfp_t );
97 int ceph_stable_mod(int x, int b, int bmask);
10 unsigned int ceph_str_hash(int type, const char *s, unsigned int len);
194 void crush_destroy(struct crush_map *map);
44 bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool);
152 int ceph_osd_is_down(struct ceph_osdmap *map, int osd);
162 char * ceph_osdmap_state_str(char *str, int len, int state);
163 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
216 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid);
222 const char * ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
223 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
11 __u32 crush_hash32_2(int type, __u32 a, __u32 b);
13 int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
14 int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, const __u32 *weight, int weight_max, int *scratch);
112 int calc_bits_of(unsigned int t);
125 void calc_pg_masks(struct ceph_pg_pool_info *pi);
134 int crush_decode_uniform_bucket(void **p, void *end, struct crush_bucket_uniform *b);
145 int crush_decode_list_bucket(void **p, void *end, struct crush_bucket_list *b);
166 int crush_decode_tree_bucket(void **p, void *end, struct crush_bucket_tree *b);
183 int crush_decode_straw_bucket(void **p, void *end, struct crush_bucket_straw *b);
204 int skip_name_map(void **p, void *end);
219 struct crush_map * crush_decode(void *pbyval, void *end);
432 int pgid_cmp(struct ceph_pg l, struct ceph_pg r);
445 int __insert_pg_mapping(struct ceph_pg_mapping *new, struct rb_root *root);
471 struct ceph_pg_mapping * __lookup_pg_mapping(struct rb_root *root, struct ceph_pg pgid);
494 int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid);
512 int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new);
534 struct ceph_pg_pool_info * __lookup_pg_pool(struct rb_root *root, u64 id);
570 const char __kstrtab_ceph_pg_pool_name_by_id[24U] = { 'c', 'e', 'p', 'h', '_', 'p', 'g', '_', 'p', 'o', 'o', 'l', '_', 'n', 'a', 'm', 'e', '_', 'b', 'y', '_', 'i', 'd', '\x0' };
570 const struct kernel_symbol __ksymtab_ceph_pg_pool_name_by_id;
570 const struct kernel_symbol __ksymtab_ceph_pg_pool_name_by_id = { (unsigned long)(&ceph_pg_pool_name_by_id), (const char *)(&__kstrtab_ceph_pg_pool_name_by_id) };
584 const char __kstrtab_ceph_pg_poolid_by_name[23U] = { 'c', 'e', 'p', 'h', '_', 'p', 'g', '_', 'p', 'o', 'o', 'l', 'i', 'd', '_', 'b', 'y', '_', 'n', 'a', 'm', 'e', '\x0' };
584 const struct kernel_symbol __ksymtab_ceph_pg_poolid_by_name;
584 const struct kernel_symbol __ksymtab_ceph_pg_poolid_by_name = { (unsigned long)(&ceph_pg_poolid_by_name), (const char *)(&__kstrtab_ceph_pg_poolid_by_name) };
586 void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi);
593 int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi);
674 int decode_pool_names(void **p, void *end, struct ceph_osdmap *map);
745 int osdmap_set_max_osd(struct ceph_osdmap *map, int max);
800 int get_osdmap_client_data_v(void **p, void *end, const char *prefix, u8 *v);
849 int __decode_pools(void **p, void *end, struct ceph_osdmap *map, bool incremental);
888 int decode_pools(void **p, void *end, struct ceph_osdmap *map);
893 int decode_new_pools(void **p, void *end, struct ceph_osdmap *map);
898 int __decode_pg_temp(void **p, void *end, struct ceph_osdmap *map, bool incremental);
949 int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map);
954 int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map);
959 int __decode_primary_temp(void **p, void *end, struct ceph_osdmap *map, bool incremental);
1003 int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map);
1008 int decode_new_primary_temp(void **p, void *end, struct ceph_osdmap *map);
1024 int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff);
1046 int decode_primary_affinity(void **p, void *end, struct ceph_osdmap *map);
1076 int decode_new_primary_affinity(void **p, void *end, struct ceph_osdmap *map);
1105 int osdmap_decode(void **p, void *end, struct ceph_osdmap *map);
1500 const char __kstrtab_ceph_calc_file_object_mapping[30U] = { 'c', 'e', 'p', 'h', '_', 'c', 'a', 'l', 'c', '_', 'f', 'i', 'l', 'e', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'm', 'a', 'p', 'p', 'i', 'n', 'g', '\x0' };
1500 const struct kernel_symbol __ksymtab_ceph_calc_file_object_mapping;
1500 const struct kernel_symbol __ksymtab_ceph_calc_file_object_mapping = { (unsigned long)(&ceph_calc_file_object_mapping), (const char *)(&__kstrtab_ceph_calc_file_object_mapping) };
1526 const char __kstrtab_ceph_oloc_oid_to_pg[20U] = { 'c', 'e', 'p', 'h', '_', 'o', 'l', 'o', 'c', '_', 'o', 'i', 'd', '_', 't', 'o', '_', 'p', 'g', '\x0' };
1526 const struct kernel_symbol __ksymtab_ceph_oloc_oid_to_pg;
1526 const struct kernel_symbol __ksymtab_ceph_oloc_oid_to_pg = { (unsigned long)(&ceph_oloc_oid_to_pg), (const char *)(&__kstrtab_ceph_oloc_oid_to_pg) };
1528 int do_crush(struct ceph_osdmap *map, int ruleno, int x, int *result, int result_max, const __u32 *weight, int weight_max);
1549 int pg_to_raw_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pool, struct ceph_pg pgid, u32 pps, int *osds);
1585 int raw_to_up_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pool, int *osds, int len, int *primary);
1620 void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps, struct ceph_pg_pool_info *pool, int *osds, int len, int *primary);
1693 int apply_temps(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pool, struct ceph_pg pgid, int *osds, int len, int *primary);
1808 const char __kstrtab_ceph_calc_pg_primary[21U] = { 'c', 'e', 'p', 'h', '_', 'c', 'a', 'l', 'c', '_', 'p', 'g', '_', 'p', 'r', 'i', 'm', 'a', 'r', 'y', '\x0' };
1808 const struct kernel_symbol __ksymtab_ceph_calc_pg_primary;
1808 const struct kernel_symbol __ksymtab_ceph_calc_pg_primary = { (unsigned long)(&ceph_calc_pg_primary), (const char *)(&__kstrtab_ceph_calc_pg_primary) };
178 void ldv_mutex_unlock_264(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_263(struct mutex *ldv_func_arg1);
107 const char * crush_bucket_alg_name(int alg);
187 int crush_get_bucket_item_weight(const struct crush_bucket *b, int p);
188 void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
189 void crush_destroy_bucket_list(struct crush_bucket_list *b);
190 void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
191 void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
192 void crush_destroy_bucket(struct crush_bucket *b);
193 void crush_destroy_rule(struct crush_rule *rule);
196 int crush_calc_tree_node(int i);
178 void ldv_mutex_unlock_268(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_267(struct mutex *ldv_func_arg1);
12 __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c);
13 __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d);
148 int bucket_perm_choose(struct crush_bucket *bucket, int x, int r);
208 int bucket_uniform_choose(struct crush_bucket_uniform *bucket, int x, int r);
215 int bucket_list_choose(struct crush_bucket_list *bucket, int x, int r);
241 int height(int n);
251 int left(int x);
257 int right(int x);
263 int terminal(int x);
268 int bucket_tree_choose(struct crush_bucket_tree *bucket, int x, int r);
300 int bucket_straw_choose(struct crush_bucket_straw *bucket, int x, int r);
320 int crush_bucket_choose(struct crush_bucket *in, int x, int r);
347 int is_out(const struct crush_map *map, const __u32 *weight, int weight_max, int item, int x);
381 int crush_choose_firstn(const struct crush_map *map, struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, int *out2, int parent_r);
556 void crush_choose_indep(const struct crush_map *map, struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left___0, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, int recurse_to_leaf, int *out2, int parent_r);
8 const char * crush_hash_name(int type);
10 __u32 crush_hash32(int type, __u32 a);
14 __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e);
103 __u32 crush_hash32_rjenkins1(__u32 a);
114 __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b);
125 __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c);
138 __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d);
152 __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d, __u32 e);
177 int ldv_mutex_trylock_274(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_272(struct mutex *ldv_func_arg1);
186 void ldv_mutex_unlock_275(struct mutex *ldv_func_arg1);
190 void ldv_mutex_unlock_276(struct mutex *ldv_func_arg1);
194 void ldv_mutex_unlock_279(struct mutex *ldv_func_arg1);
198 void ldv_mutex_unlock_281(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_271(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_273(struct mutex *ldv_func_arg1);
18 void ldv_mutex_lock_277(struct mutex *ldv_func_arg1);
22 void ldv_mutex_lock_278(struct mutex *ldv_func_arg1);
26 void ldv_mutex_lock_280(struct mutex *ldv_func_arg1);
99 ssize_t seq_read(struct file *, char *, size_t , loff_t *);
100 loff_t seq_lseek(struct file *, loff_t , int);
107 int seq_printf(struct seq_file *, const char *, ...);
140 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
142 int single_release(struct inode *, struct file *);
49 struct dentry * debugfs_create_file(const char *, umode_t , struct dentry *, void *, const struct file_operations *);
53 struct dentry * debugfs_create_dir(const char *, struct dentry *);
58 void debugfs_remove(struct dentry *);
109 struct dentry *ceph_debugfs_dir = 0;
111 int monmap_show(struct seq_file *s, void *p);
131 int osdmap_show(struct seq_file *s, void *p);
187 int monc_show(struct seq_file *s, void *p);
221 int osdc_show(struct seq_file *s, void *pp);
260 int monmap_show_open(struct inode *inode, struct file *file);
260 const struct file_operations monmap_show_fops = { 0, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &monmap_show_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
261 int osdmap_show_open(struct inode *inode, struct file *file);
261 const struct file_operations osdmap_show_fops = { 0, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &osdmap_show_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
262 int monc_show_open(struct inode *inode, struct file *file);
262 const struct file_operations monc_show_fops = { 0, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &monc_show_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
263 int osdc_show_open(struct inode *inode, struct file *file);
263 const struct file_operations osdc_show_fops = { 0, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &osdc_show_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
364 const char __kstrtab_ceph_debugfs_init[18U] = { 'c', 'e', 'p', 'h', '_', 'd', 'e', 'b', 'u', 'g', 'f', 's', '_', 'i', 'n', 'i', 't', '\x0' };
364 const struct kernel_symbol __ksymtab_ceph_debugfs_init;
364 const struct kernel_symbol __ksymtab_ceph_debugfs_init = { (unsigned long)(&ceph_debugfs_init), (const char *)(&__kstrtab_ceph_debugfs_init) };
365 const char __kstrtab_ceph_debugfs_cleanup[21U] = { 'c', 'e', 'p', 'h', '_', 'd', 'e', 'b', 'u', 'g', 'f', 's', '_', 'c', 'l', 'e', 'a', 'n', 'u', 'p', '\x0' };
365 const struct kernel_symbol __ksymtab_ceph_debugfs_cleanup;
365 const struct kernel_symbol __ksymtab_ceph_debugfs_cleanup = { (unsigned long)(&ceph_debugfs_cleanup), (const char *)(&__kstrtab_ceph_debugfs_cleanup) };
178 void ldv_mutex_unlock_294(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_295(struct mutex *ldv_func_arg1);
186 void ldv_mutex_unlock_298(struct mutex *ldv_func_arg1);
190 void ldv_mutex_unlock_300(struct mutex *ldv_func_arg1);
194 void ldv_mutex_unlock_302(struct mutex *ldv_func_arg1);
198 void ldv_mutex_unlock_304(struct mutex *ldv_func_arg1);
202 void ldv_mutex_unlock_306(struct mutex *ldv_func_arg1);
206 void ldv_mutex_unlock_308(struct mutex *ldv_func_arg1);
210 void ldv_mutex_unlock_310(struct mutex *ldv_func_arg1);
214 void ldv_mutex_unlock_312(struct mutex *ldv_func_arg1);
218 void ldv_mutex_unlock_314(struct mutex *ldv_func_arg1);
222 void ldv_mutex_unlock_316(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_293(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_296(struct mutex *ldv_func_arg1);
18 void ldv_mutex_lock_297(struct mutex *ldv_func_arg1);
22 void ldv_mutex_lock_299(struct mutex *ldv_func_arg1);
26 void ldv_mutex_lock_301(struct mutex *ldv_func_arg1);
30 void ldv_mutex_lock_303(struct mutex *ldv_func_arg1);
34 void ldv_mutex_lock_305(struct mutex *ldv_func_arg1);
38 void ldv_mutex_lock_307(struct mutex *ldv_func_arg1);
42 void ldv_mutex_lock_309(struct mutex *ldv_func_arg1);
46 void ldv_mutex_lock_311(struct mutex *ldv_func_arg1);
50 void ldv_mutex_lock_313(struct mutex *ldv_func_arg1);
54 void ldv_mutex_lock_315(struct mutex *ldv_func_arg1);
91 void ldv_mutex_lock_mutex_of_ceph_auth_client(struct mutex *lock);
95 void ldv_mutex_unlock_mutex_of_ceph_auth_client(struct mutex *lock);
188 void ceph_encode_copy(void **p, const void *s, int len);
96 int ceph_entity_name_encode(const char *name, void **p, void *end);
26 int ceph_auth_none_init(struct ceph_auth_client *ac);
48 int ceph_x_init(struct ceph_auth_client *ac);
97 u32 supported_protocols[2U] = { 1U, 2U };
102 int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol);
223 int ceph_build_auth_request(struct ceph_auth_client *ac, void *msg_buf, size_t msg_len);
359 const char __kstrtab_ceph_auth_is_authenticated[27U] = { 'c', 'e', 'p', 'h', '_', 'a', 'u', 't', 'h', '_', 'i', 's', '_', 'a', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'a', 't', 'e', 'd', '\x0' };
359 const struct kernel_symbol __ksymtab_ceph_auth_is_authenticated;
359 const struct kernel_symbol __ksymtab_ceph_auth_is_authenticated = { (unsigned long)(&ceph_auth_is_authenticated), (const char *)(&__kstrtab_ceph_auth_is_authenticated) };
373 const char __kstrtab_ceph_auth_create_authorizer[28U] = { 'c', 'e', 'p', 'h', '_', 'a', 'u', 't', 'h', '_', 'c', 'r', 'e', 'a', 't', 'e', '_', 'a', 'u', 't', 'h', 'o', 'r', 'i', 'z', 'e', 'r', '\x0' };
373 const struct kernel_symbol __ksymtab_ceph_auth_create_authorizer;
373 const struct kernel_symbol __ksymtab_ceph_auth_create_authorizer = { (unsigned long)(&ceph_auth_create_authorizer), (const char *)(&__kstrtab_ceph_auth_create_authorizer) };
383 const char __kstrtab_ceph_auth_destroy_authorizer[29U] = { 'c', 'e', 'p', 'h', '_', 'a', 'u', 't', 'h', '_', 'd', 'e', 's', 't', 'r', 'o', 'y', '_', 'a', 'u', 't', 'h', 'o', 'r', 'i', 'z', 'e', 'r', '\x0' };
383 const struct kernel_symbol __ksymtab_ceph_auth_destroy_authorizer;
383 const struct kernel_symbol __ksymtab_ceph_auth_destroy_authorizer = { (unsigned long)(&ceph_auth_destroy_authorizer), (const char *)(&__kstrtab_ceph_auth_destroy_authorizer) };
397 const char __kstrtab_ceph_auth_update_authorizer[28U] = { 'c', 'e', 'p', 'h', '_', 'a', 'u', 't', 'h', '_', 'u', 'p', 'd', 'a', 't', 'e', '_', 'a', 'u', 't', 'h', 'o', 'r', 'i', 'z', 'e', 'r', '\x0' };
397 const struct kernel_symbol __ksymtab_ceph_auth_update_authorizer;
397 const struct kernel_symbol __ksymtab_ceph_auth_update_authorizer = { (unsigned long)(&ceph_auth_update_authorizer), (const char *)(&__kstrtab_ceph_auth_update_authorizer) };
410 const char __kstrtab_ceph_auth_verify_authorizer_reply[34U] = { 'c', 'e', 'p', 'h', '_', 'a', 'u', 't', 'h', '_', 'v', 'e', 'r', 'i', 'f', 'y', '_', 'a', 'u', 't', 'h', 'o', 'r', 'i', 'z', 'e', 'r', '_', 'r', 'e', 'p', 'l', 'y', '\x0' };
410 const struct kernel_symbol __ksymtab_ceph_auth_verify_authorizer_reply;
410 const struct kernel_symbol __ksymtab_ceph_auth_verify_authorizer_reply = { (unsigned long)(&ceph_auth_verify_authorizer_reply), (const char *)(&__kstrtab_ceph_auth_verify_authorizer_reply) };
419 const char __kstrtab_ceph_auth_invalidate_authorizer[32U] = { 'c', 'e', 'p', 'h', '_', 'a', 'u', 't', 'h', '_', 'i', 'n', 'v', 'a', 'l', 'i', 'd', 'a', 't', 'e', '_', 'a', 'u', 't', 'h', 'o', 'r', 'i', 'z', 'e', 'r', '\x0' };
419 const struct kernel_symbol __ksymtab_ceph_auth_invalidate_authorizer;
419 const struct kernel_symbol __ksymtab_ceph_auth_invalidate_authorizer = { (unsigned long)(&ceph_auth_invalidate_authorizer), (const char *)(&__kstrtab_ceph_auth_invalidate_authorizer) };
178 void ldv_mutex_unlock_342(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_341(struct mutex *ldv_func_arg1);
94 void reset(struct ceph_auth_client *ac);
102 void destroy(struct ceph_auth_client *ac);
108 int is_authenticated(struct ceph_auth_client *ac);
115 int should_authenticate(struct ceph_auth_client *ac);
122 int build_request(struct ceph_auth_client *ac, void *buf, void *end);
131 int handle_reply___0(struct ceph_auth_client *ac, int result, void *buf, void *end);
145 int ceph_auth_none_create_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth);
182 void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a);
188 const struct ceph_auth_client_ops ceph_auth_none_ops = { "none", &is_authenticated, &should_authenticate, &build_request, &handle_reply___0, &ceph_auth_none_create_authorizer, 0, 0, &ceph_auth_none_destroy_authorizer, 0, &reset, &destroy };
256 void ldv_main13_sequence_infinite_withcheck_stateful();
65 bool __virt_addr_valid(unsigned long);
120 void * kmemdup(const void *, size_t , gfp_t );
178 void ldv_mutex_unlock_346(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_345(struct mutex *ldv_func_arg1);
57 void sg_assign_page(struct scatterlist *sg, struct page *page);
87 void sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len, unsigned int offset);
111 void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen);
226 void sg_init_table(struct scatterlist *, unsigned int);
492 struct crypto_tfm * crypto_alloc_base(const char *, u32 , u32 );
495 void crypto_free_tfm(struct crypto_tfm *tfm);
871 struct crypto_blkcipher * __crypto_blkcipher_cast(struct crypto_tfm *tfm);
884 struct crypto_blkcipher * crypto_alloc_blkcipher(const char *alg_name, u32 type, u32 mask);
894 struct crypto_tfm * crypto_blkcipher_tfm(struct crypto_blkcipher *tfm);
900 void crypto_free_blkcipher(struct crypto_blkcipher *tfm);
919 struct blkcipher_tfm * crypto_blkcipher_crt(struct crypto_blkcipher *tfm);
925 struct blkcipher_alg * crypto_blkcipher_alg(struct crypto_blkcipher *tfm);
931 unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm);
965 int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, const u8 *key, unsigned int keylen);
972 int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes);
989 int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes);
139 int register_key_type(struct key_type *);
140 void unregister_key_type(struct key_type *);
163 int generic_key_instantiate(struct key *, struct key_preparsed_payload *);
43 int user_match(const struct key *, const void *);
25 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
26 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
30 int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len);
33 int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len);
36 int ceph_decrypt2(struct ceph_crypto_key *secret, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len);
40 int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len);
49 int ceph_unarmor(char *dst, const char *src, const char *end);
166 struct crypto_blkcipher * ceph_crypto_alloc_cipher();
171 const u8 *aes_iv = (const u8 *)"cephsageyudagreg";
173 int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len);
223 int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len);
277 int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len);
337 int ceph_aes_decrypt2(const void *key, int key_len, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len);
507 int ceph_key_preparse(struct key_preparsed_payload *prep);
539 void ceph_key_free_preparse(struct key_preparsed_payload *prep);
546 void ceph_key_destroy(struct key *key);
554 struct key_type key_type_ceph = { "ceph", 0UL, 0U, 0, &ceph_key_preparse, &ceph_key_free_preparse, &generic_key_instantiate, 0, &user_match, 0, &ceph_key_destroy, 0, 0, 0, { 0, 0 }, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } };
608 void ldv_main14_sequence_infinite_withcheck_stateful();
83 int ceph_armor(char *dst, const char *src, const char *end);
90 const char *pem_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
93 int encode_bits(int c);
98 int decode_bits(char c);
178 void ldv_mutex_unlock_350(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_349(struct mutex *ldv_func_arg1);
20 unsigned long int get_seconds();
137 void ceph_decode_timespec(struct timespec *ts, const struct ceph_timespec *tv);
98 void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
100 int ceph_x_is_authenticated(struct ceph_auth_client *ac);
111 int ceph_x_should_authenticate(struct ceph_auth_client *ac);
122 int ceph_x_encrypt_buflen(int ilen);
128 int ceph_x_encrypt(struct ceph_crypto_key *secret, void *ibuf, int ilen, void *obuf, size_t olen);
146 int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end, void *obuf, size_t olen);
172 struct ceph_x_ticket_handler * get_ticket_handler(struct ceph_auth_client *ac, int service);
199 void remove_ticket_handler(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th);
212 int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end);
353 int ceph_x_build_authorizer(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th, struct ceph_x_authorizer *au);
421 int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th, void **p, void *end);
474 int ceph_x_build_request(struct ceph_auth_client *ac, void *buf, void *end);
553 int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end);
608 int ceph_x_create_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth);
639 int ceph_x_update_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth);
659 int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len);
687 void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a);
697 void ceph_x_reset(struct ceph_auth_client *ac);
706 void ceph_x_destroy(struct ceph_auth_client *ac);
727 void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type);
738 const struct ceph_auth_client_ops ceph_x_ops = { "x", &ceph_x_is_authenticated, &ceph_x_should_authenticate, &ceph_x_build_request, &ceph_x_handle_reply, &ceph_x_create_authorizer, &ceph_x_update_authorizer, &ceph_x_verify_authorizer_reply, &ceph_x_destroy_authorizer, &ceph_x_invalidate_authorizer, &ceph_x_reset, &ceph_x_destroy };
830 void ldv_main16_sequence_infinite_withcheck_stateful();
178 void ldv_mutex_unlock_354(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_353(struct mutex *ldv_func_arg1);
81 int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
550 int ceph_flags_to_mode(int flags);
640 int ceph_caps_for_mode(int mode);
138 const char __kstrtab_ceph_flags_to_mode[19U] = { 'c', 'e', 'p', 'h', '_', 'f', 'l', 'a', 'g', 's', '_', 't', 'o', '_', 'm', 'o', 'd', 'e', '\x0' };
138 const struct kernel_symbol __ksymtab_ceph_flags_to_mode;
138 const struct kernel_symbol __ksymtab_ceph_flags_to_mode = { (unsigned long)(&ceph_flags_to_mode), (const char *)(&__kstrtab_ceph_flags_to_mode) };
157 const char __kstrtab_ceph_caps_for_mode[19U] = { 'c', 'e', 'p', 'h', '_', 'c', 'a', 'p', 's', '_', 'f', 'o', 'r', '_', 'm', 'o', 'd', 'e', '\x0' };
157 const struct kernel_symbol __ksymtab_ceph_caps_for_mode;
157 const struct kernel_symbol __ksymtab_ceph_caps_for_mode = { (unsigned long)(&ceph_caps_for_mode), (const char *)(&__kstrtab_ceph_caps_for_mode) };
178 void ldv_mutex_unlock_358(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_357(struct mutex *ldv_func_arg1);
131 const char * ceph_osd_state_name(int s);
194 const char * ceph_pool_op_name(int op);
178 void ldv_mutex_unlock_362(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_361(struct mutex *ldv_func_arg1);
7 unsigned int ceph_str_hash_linux(const char *str, unsigned int length);
8 unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length);
11 const char * ceph_str_hash_name(int type);
187 const char __kstrtab_ceph_str_hash[14U] = { 'c', 'e', 'p', 'h', '_', 's', 't', 'r', '_', 'h', 'a', 's', 'h', '\x0' };
187 const struct kernel_symbol __ksymtab_ceph_str_hash;
187 const struct kernel_symbol __ksymtab_ceph_str_hash = { (unsigned long)(&ceph_str_hash), (const char *)(&__kstrtab_ceph_str_hash) };
200 const char __kstrtab_ceph_str_hash_name[19U] = { 'c', 'e', 'p', 'h', '_', 's', 't', 'r', '_', 'h', 'a', 's', 'h', '_', 'n', 'a', 'm', 'e', '\x0' };
200 const struct kernel_symbol __ksymtab_ceph_str_hash_name;
200 const struct kernel_symbol __ksymtab_ceph_str_hash_name = { (unsigned long)(&ceph_str_hash_name), (const char *)(&__kstrtab_ceph_str_hash_name) };
1 unsigned long int __builtin_object_size(void *, int);
230 void might_fault();
178 void ldv_mutex_unlock_366(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_367(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_365(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_368(struct mutex *ldv_func_arg1);
8 extern int __preempt_count;
73 void __preempt_count_add(int val);
78 void __preempt_count_sub(int val);
1195 long int get_user_pages(struct task_struct *, struct mm_struct *, unsigned long, unsigned long, int, int, struct page **, struct vm_area_struct **);
1218 int set_page_dirty_lock(struct page *);
645 unsigned long int _copy_from_user(void *, const void *, unsigned int);
667 void __copy_from_user_overflow();
688 unsigned long int copy_from_user(void *to, const void *from, unsigned long n);
16 void pagefault_disable();
26 void pagefault_enable();
66 void * kmap_atomic(struct page *page);
73 void __kunmap_atomic(void *addr);
192 void zero_user_segments(struct page *page, unsigned int start1, unsigned int end1, unsigned int start2, unsigned int end2);
210 void zero_user_segment(struct page *page, unsigned int start, unsigned int end);
209 struct page ** ceph_get_direct_page_vector(const void *data, int num_pages, bool write_page);
212 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty);
215 struct page ** ceph_alloc_page_vector(int num_pages, gfp_t flags);
216 int ceph_copy_user_to_page_vector(struct page **pages, const void *data, loff_t off, size_t len);
219 void ceph_copy_to_page_vector(struct page **pages, const void *data, loff_t off, size_t len);
222 void ceph_copy_from_page_vector(struct page **pages, void *data, loff_t off, size_t len);
225 void ceph_zero_page_vector_range(int off, int len, struct page **pages);
124 const char __kstrtab_ceph_get_direct_page_vector[28U] = { 'c', 'e', 'p', 'h', '_', 'g', 'e', 't', '_', 'd', 'i', 'r', 'e', 'c', 't', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '\x0' };
124 const struct kernel_symbol __ksymtab_ceph_get_direct_page_vector;
124 const struct kernel_symbol __ksymtab_ceph_get_direct_page_vector = { (unsigned long)(&ceph_get_direct_page_vector), (const char *)(&__kstrtab_ceph_get_direct_page_vector) };
140 const char __kstrtab_ceph_put_page_vector[21U] = { 'c', 'e', 'p', 'h', '_', 'p', 'u', 't', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '\x0' };
140 const struct kernel_symbol __ksymtab_ceph_put_page_vector;
140 const struct kernel_symbol __ksymtab_ceph_put_page_vector = { (unsigned long)(&ceph_put_page_vector), (const char *)(&__kstrtab_ceph_put_page_vector) };
150 const char __kstrtab_ceph_release_page_vector[25U] = { 'c', 'e', 'p', 'h', '_', 'r', 'e', 'l', 'e', 'a', 's', 'e', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '\x0' };
150 const struct kernel_symbol __ksymtab_ceph_release_page_vector;
150 const struct kernel_symbol __ksymtab_ceph_release_page_vector = { (unsigned long)(&ceph_release_page_vector), (const char *)(&__kstrtab_ceph_release_page_vector) };
172 const char __kstrtab_ceph_alloc_page_vector[23U] = { 'c', 'e', 'p', 'h', '_', 'a', 'l', 'l', 'o', 'c', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '\x0' };
172 const struct kernel_symbol __ksymtab_ceph_alloc_page_vector;
172 const struct kernel_symbol __ksymtab_ceph_alloc_page_vector = { (unsigned long)(&ceph_alloc_page_vector), (const char *)(&__kstrtab_ceph_alloc_page_vector) };
201 const char __kstrtab_ceph_copy_user_to_page_vector[30U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'p', 'y', '_', 'u', 's', 'e', 'r', '_', 't', 'o', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '\x0' };
201 const struct kernel_symbol __ksymtab_ceph_copy_user_to_page_vector;
201 const struct kernel_symbol __ksymtab_ceph_copy_user_to_page_vector = { (unsigned long)(&ceph_copy_user_to_page_vector), (const char *)(&__kstrtab_ceph_copy_user_to_page_vector) };
224 const char __kstrtab_ceph_copy_to_page_vector[25U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'p', 'y', '_', 't', 'o', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '\x0' };
224 const struct kernel_symbol __ksymtab_ceph_copy_to_page_vector;
224 const struct kernel_symbol __ksymtab_ceph_copy_to_page_vector = { (unsigned long)(&ceph_copy_to_page_vector), (const char *)(&__kstrtab_ceph_copy_to_page_vector) };
247 const char __kstrtab_ceph_copy_from_page_vector[27U] = { 'c', 'e', 'p', 'h', '_', 'c', 'o', 'p', 'y', '_', 'f', 'r', 'o', 'm', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '\x0' };
247 const struct kernel_symbol __ksymtab_ceph_copy_from_page_vector;
247 const struct kernel_symbol __ksymtab_ceph_copy_from_page_vector = { (unsigned long)(&ceph_copy_from_page_vector), (const char *)(&__kstrtab_ceph_copy_from_page_vector) };
282 const char __kstrtab_ceph_zero_page_vector_range[28U] = { 'c', 'e', 'p', 'h', '_', 'z', 'e', 'r', 'o', '_', 'p', 'a', 'g', 'e', '_', 'v', 'e', 'c', 't', 'o', 'r', '_', 'r', 'a', 'n', 'g', 'e', '\x0' };
282 const struct kernel_symbol __ksymtab_ceph_zero_page_vector_range;
282 const struct kernel_symbol __ksymtab_ceph_zero_page_vector_range = { (unsigned long)(&ceph_zero_page_vector_range), (const char *)(&__kstrtab_ceph_zero_page_vector_range) };
178 void ldv_mutex_unlock_373(struct mutex *ldv_func_arg1);
182 void ldv_mutex_unlock_376(struct mutex *ldv_func_arg1);
10 void ldv_mutex_lock_374(struct mutex *ldv_func_arg1);
14 void ldv_mutex_lock_375(struct mutex *ldv_func_arg1);
160 struct ceph_snap_context * ceph_create_snap_context(u32 snap_count, gfp_t gfp_flags);
138 const char __kstrtab_ceph_create_snap_context[25U] = { 'c', 'e', 'p', 'h', '_', 'c', 'r', 'e', 'a', 't', 'e', '_', 's', 'n', 'a', 'p', '_', 'c', 'o', 'n', 't', 'e', 'x', 't', '\x0' };
138 const struct kernel_symbol __ksymtab_ceph_create_snap_context;
138 const struct kernel_symbol __ksymtab_ceph_create_snap_context = { (unsigned long)(&ceph_create_snap_context), (const char *)(&__kstrtab_ceph_create_snap_context) };
146 const char __kstrtab_ceph_get_snap_context[22U] = { 'c', 'e', 'p', 'h', '_', 'g', 'e', 't', '_', 's', 'n', 'a', 'p', '_', 'c', 'o', 'n', 't', 'e', 'x', 't', '\x0' };
146 const struct kernel_symbol __ksymtab_ceph_get_snap_context;
146 const struct kernel_symbol __ksymtab_ceph_get_snap_context = { (unsigned long)(&ceph_get_snap_context), (const char *)(&__kstrtab_ceph_get_snap_context) };
157 const char __kstrtab_ceph_put_snap_context[22U] = { 'c', 'e', 'p', 'h', '_', 'p', 'u', 't', '_', 's', 'n', 'a', 'p', '_', 'c', 'o', 'n', 't', 'e', 'x', 't', '\x0' };
157 const struct kernel_symbol __ksymtab_ceph_put_snap_context;
157 const struct kernel_symbol __ksymtab_ceph_put_snap_context = { (unsigned long)(&ceph_put_snap_context), (const char *)(&__kstrtab_ceph_put_snap_context) };
10 void ldv_error();
25 int ldv_undef_int();
59 void __builtin_trap();
8 int ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 0;
11 int ldv_mutex_lock_interruptible_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock);
37 int ldv_mutex_lock_killable_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock);
72 int ldv_mutex_trylock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock);
98 int ldv_atomic_dec_and_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(atomic_t *cnt, struct mutex *lock);
123 int ldv_mutex_is_locked_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock);
160 int ldv_mutex_i_mutex_of_inode = 0;
163 int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock);
189 int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock);
224 int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock);
250 int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock);
275 int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock);
312 int ldv_mutex_lock = 0;
315 int ldv_mutex_lock_interruptible_lock(struct mutex *lock);
341 int ldv_mutex_lock_killable_lock(struct mutex *lock);
376 int ldv_mutex_trylock_lock(struct mutex *lock);
402 int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock);
427 int ldv_mutex_is_locked_lock(struct mutex *lock);
464 int ldv_mutex_mount_mutex_of_ceph_client = 0;
467 int ldv_mutex_lock_interruptible_mount_mutex_of_ceph_client(struct mutex *lock);
493 int ldv_mutex_lock_killable_mount_mutex_of_ceph_client(struct mutex *lock);
528 int ldv_mutex_trylock_mount_mutex_of_ceph_client(struct mutex *lock);
554 int ldv_atomic_dec_and_mutex_lock_mount_mutex_of_ceph_client(atomic_t *cnt, struct mutex *lock);
579 int ldv_mutex_is_locked_mount_mutex_of_ceph_client(struct mutex *lock);
616 int ldv_mutex_mutex_of_ceph_auth_client = 0;
619 int ldv_mutex_lock_interruptible_mutex_of_ceph_auth_client(struct mutex *lock);
645 int ldv_mutex_lock_killable_mutex_of_ceph_auth_client(struct mutex *lock);
680 int ldv_mutex_trylock_mutex_of_ceph_auth_client(struct mutex *lock);
706 int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_auth_client(atomic_t *cnt, struct mutex *lock);
731 int ldv_mutex_is_locked_mutex_of_ceph_auth_client(struct mutex *lock);
768 int ldv_mutex_mutex_of_ceph_connection = 0;
771 int ldv_mutex_lock_interruptible_mutex_of_ceph_connection(struct mutex *lock);
797 int ldv_mutex_lock_killable_mutex_of_ceph_connection(struct mutex *lock);
832 int ldv_mutex_trylock_mutex_of_ceph_connection(struct mutex *lock);
858 int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_connection(atomic_t *cnt, struct mutex *lock);
883 int ldv_mutex_is_locked_mutex_of_ceph_connection(struct mutex *lock);
920 int ldv_mutex_mutex_of_ceph_mon_client = 0;
923 int ldv_mutex_lock_interruptible_mutex_of_ceph_mon_client(struct mutex *lock);
949 int ldv_mutex_lock_killable_mutex_of_ceph_mon_client(struct mutex *lock);
984 int ldv_mutex_trylock_mutex_of_ceph_mon_client(struct mutex *lock);
1010 int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_mon_client(atomic_t *cnt, struct mutex *lock);
1035 int ldv_mutex_is_locked_mutex_of_ceph_mon_client(struct mutex *lock);
1072 int ldv_mutex_mutex_of_device = 0;
1075 int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock);
1101 int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock);
1162 int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock);
1187 int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock);
1224 int ldv_mutex_request_mutex_of_ceph_osd_client = 0;
1227 int ldv_mutex_lock_interruptible_request_mutex_of_ceph_osd_client(struct mutex *lock);
1253 int ldv_mutex_lock_killable_request_mutex_of_ceph_osd_client(struct mutex *lock);
1288 int ldv_mutex_trylock_request_mutex_of_ceph_osd_client(struct mutex *lock);
1314 int ldv_atomic_dec_and_mutex_lock_request_mutex_of_ceph_osd_client(atomic_t *cnt, struct mutex *lock);
1339 int ldv_mutex_is_locked_request_mutex_of_ceph_osd_client(struct mutex *lock);
return ;
}
-entry_point
{
3070 struct ceph_connection *var_group1;
3071 struct ceph_msg *var_group2;
3072 int *var_get_authorizer_100_p1;
3073 int var_get_authorizer_100_p2;
3074 int var_verify_authorizer_reply_101_p1;
3075 struct ceph_msg_header *var_group3;
3076 int *var_alloc_msg_97_p2;
3077 int tmp;
3078 int tmp___0;
3256 LDV_IN_INTERRUPT = 1;
3265 -ldv_initialize()
{
1381 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 1;
1383 ldv_mutex_i_mutex_of_inode = 1;
1385 ldv_mutex_lock = 1;
1387 ldv_mutex_mount_mutex_of_ceph_client = 1;
1389 ldv_mutex_mutex_of_ceph_auth_client = 1;
1391 ldv_mutex_mutex_of_ceph_connection = 1;
1393 ldv_mutex_mutex_of_ceph_mon_client = 1;
1395 ldv_mutex_mutex_of_device = 1;
1397 ldv_mutex_request_mutex_of_ceph_osd_client = 1;
1398 return ;;
}
3269 goto ldv_33183;
3269 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
3269 assume(tmp___0 != 0);
3271 goto ldv_33182;
3270 ldv_33182:;
3272 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
3272 switch (tmp)
3273 assume(!(tmp == 0));
3309 assume(!(tmp == 1));
3344 assume(!(tmp == 2));
3379 assume(!(tmp == 3));
3414 assume(!(tmp == 4));
3449 assume(tmp == 5);
3475 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
3476 -invalidate_authorizer(var_group1)
{
3012 struct ceph_osd *o;
3013 struct ceph_osd_client *osdc;
3014 struct ceph_auth_client *ac;
3015 int tmp;
3012 struct ceph_osd *__CPAchecker_TMP_0 = (struct ceph_osd *)(con->private);
3012 o = __CPAchecker_TMP_0;
3013 osdc = o->o_osdc;
3014 ac = osdc->client->monc.auth;
3016 -ceph_auth_invalidate_authorizer(ac, 4)
{
414 -ldv_mutex_lock_315(&(ac->mutex))
{
600 -ldv_mutex_lock_mutex_of_ceph_auth_client(ldv_func_arg1)
{
674 assume(ldv_mutex_mutex_of_ceph_auth_client == 1);
676 ldv_mutex_mutex_of_ceph_auth_client = 2;
677 return ;;
}
602 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
603 return ;;
}
415 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ac->ops);
415 assume(!(__CPAchecker_TMP_0 != 0UL));
417 -ldv_mutex_unlock_316(&(ac->mutex))
{
608 -ldv_mutex_unlock_mutex_of_ceph_auth_client(ldv_func_arg1)
{
763 assume(ldv_mutex_mutex_of_ceph_auth_client == 2);
765 ldv_mutex_mutex_of_ceph_auth_client = 1;
766 return ;;
}
610 mutex_unlock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
611 return ;;
}
418 return ;;
}
3017 -ceph_monc_validate_auth(&(osdc->client->monc))
{
1158 int ret;
1160 -ldv_mutex_lock_141(&(monc->mutex))
{
1851 -ldv_mutex_lock_mutex_of_ceph_mon_client(ldv_func_arg1)
{
978 assume(ldv_mutex_mutex_of_ceph_mon_client == 1);
980 ldv_mutex_mutex_of_ceph_mon_client = 2;
981 return ;;
}
1853 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
1854 return ;;
}
1161 -__validate_auth(monc)
{
1143 int ret;
1145 assume(!((monc->pending_auth) != 0));
1148 size_t __CPAchecker_TMP_0 = (size_t )(monc->m_auth->front_alloc_len);
1148 -ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, __CPAchecker_TMP_0)
{
337 int ret;
338 int tmp;
338 ret = 0;
340 -ldv_mutex_lock_303(&(ac->mutex))
{
504 -ldv_mutex_lock_mutex_of_ceph_auth_client(ldv_func_arg1)
{
674 assume(ldv_mutex_mutex_of_ceph_auth_client == 1);
676 ldv_mutex_mutex_of_ceph_auth_client = 2;
677 return ;;
}
506 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
507 return ;;
}
341 assume((ac->protocol) == 0U);
342 -ceph_auth_build_hello(ac, msg_buf, msg_len)
{
182 struct ceph_mon_request_header *monhdr;
183 void *p;
184 void *end;
185 void *lenp;
186 int i;
187 int num;
188 int ret;
189 struct _ddebug descriptor;
190 const char *tmp;
191 long tmp___0;
192 int tmp___1;
193 long tmp___2;
194 int tmp___3;
195 long tmp___4;
196 int tmp___5;
197 long tmp___6;
182 monhdr = (struct ceph_mon_request_header *)buf;
183 p = ((void *)monhdr) + 1U;
183 end = buf + len;
187 -ldv_mutex_lock_299(&(ac->mutex))
{
472 -ldv_mutex_lock_mutex_of_ceph_auth_client(ldv_func_arg1)
{
674 assume(!(ldv_mutex_mutex_of_ceph_auth_client == 1));
674 -ldv_error()
{
15 LDV_ERROR:;
12 goto LDV_ERROR;
}
}
}
}
}
}
}
}
}
Source code
1 2 #include <linux/kernel.h> 3 #include <linux/mutex.h> 4 5 extern int mutex_lock_interruptible(struct mutex *lock); 6 extern int mutex_lock_killable(struct mutex *lock); 7 extern void mutex_lock(struct mutex *lock); 8 extern int ldv_mutex_lock_interruptible_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 9 extern int ldv_mutex_lock_killable_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 10 extern void ldv_mutex_lock_nested_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock, unsigned int subclass); 11 extern void ldv_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 12 extern int ldv_mutex_trylock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 13 extern int ldv_atomic_dec_and_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(atomic_t *cnt, struct mutex *lock); 14 extern int ldv_mutex_is_locked_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 15 extern void ldv_mutex_unlock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 16 extern int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock); 17 extern int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock); 18 extern void ldv_mutex_lock_nested_i_mutex_of_inode(struct mutex *lock, unsigned int subclass); 19 extern void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock); 20 extern int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock); 21 extern int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock); 22 extern int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock); 23 extern void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock); 24 extern int ldv_mutex_lock_interruptible_lock(struct mutex *lock); 25 extern int ldv_mutex_lock_killable_lock(struct mutex *lock); 26 extern void ldv_mutex_lock_nested_lock(struct mutex *lock, unsigned int subclass); 27 extern void ldv_mutex_lock_lock(struct mutex *lock); 28 extern int ldv_mutex_trylock_lock(struct mutex *lock); 29 extern int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock); 30 extern int ldv_mutex_is_locked_lock(struct mutex *lock); 31 extern void ldv_mutex_unlock_lock(struct mutex *lock); 32 extern int ldv_mutex_lock_interruptible_mount_mutex_of_ceph_client(struct mutex *lock); 33 extern int ldv_mutex_lock_killable_mount_mutex_of_ceph_client(struct mutex *lock); 34 extern void ldv_mutex_lock_nested_mount_mutex_of_ceph_client(struct mutex *lock, unsigned int subclass); 35 extern void ldv_mutex_lock_mount_mutex_of_ceph_client(struct mutex *lock); 36 extern int ldv_mutex_trylock_mount_mutex_of_ceph_client(struct mutex *lock); 37 extern int ldv_atomic_dec_and_mutex_lock_mount_mutex_of_ceph_client(atomic_t *cnt, struct mutex *lock); 38 extern int ldv_mutex_is_locked_mount_mutex_of_ceph_client(struct mutex *lock); 39 extern void ldv_mutex_unlock_mount_mutex_of_ceph_client(struct mutex *lock); 40 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_auth_client(struct mutex *lock); 41 extern int ldv_mutex_lock_killable_mutex_of_ceph_auth_client(struct mutex *lock); 42 extern void ldv_mutex_lock_nested_mutex_of_ceph_auth_client(struct mutex *lock, unsigned int subclass); 43 extern void ldv_mutex_lock_mutex_of_ceph_auth_client(struct mutex *lock); 44 extern int ldv_mutex_trylock_mutex_of_ceph_auth_client(struct mutex *lock); 45 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_auth_client(atomic_t *cnt, struct mutex *lock); 46 extern int ldv_mutex_is_locked_mutex_of_ceph_auth_client(struct mutex *lock); 47 extern void ldv_mutex_unlock_mutex_of_ceph_auth_client(struct mutex *lock); 48 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_connection(struct mutex *lock); 49 extern int ldv_mutex_lock_killable_mutex_of_ceph_connection(struct mutex *lock); 50 extern void ldv_mutex_lock_nested_mutex_of_ceph_connection(struct mutex *lock, unsigned int subclass); 51 extern void ldv_mutex_lock_mutex_of_ceph_connection(struct mutex *lock); 52 extern int ldv_mutex_trylock_mutex_of_ceph_connection(struct mutex *lock); 53 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_connection(atomic_t *cnt, struct mutex *lock); 54 extern int ldv_mutex_is_locked_mutex_of_ceph_connection(struct mutex *lock); 55 extern void ldv_mutex_unlock_mutex_of_ceph_connection(struct mutex *lock); 56 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_mon_client(struct mutex *lock); 57 extern int ldv_mutex_lock_killable_mutex_of_ceph_mon_client(struct mutex *lock); 58 extern void ldv_mutex_lock_nested_mutex_of_ceph_mon_client(struct mutex *lock, unsigned int subclass); 59 extern void ldv_mutex_lock_mutex_of_ceph_mon_client(struct mutex *lock); 60 extern int ldv_mutex_trylock_mutex_of_ceph_mon_client(struct mutex *lock); 61 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_mon_client(atomic_t *cnt, struct mutex *lock); 62 extern int ldv_mutex_is_locked_mutex_of_ceph_mon_client(struct mutex *lock); 63 extern void ldv_mutex_unlock_mutex_of_ceph_mon_client(struct mutex *lock); 64 extern int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock); 65 extern int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock); 66 extern void ldv_mutex_lock_nested_mutex_of_device(struct mutex *lock, unsigned int subclass); 67 extern void ldv_mutex_lock_mutex_of_device(struct mutex *lock); 68 extern int ldv_mutex_trylock_mutex_of_device(struct mutex *lock); 69 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock); 70 extern int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock); 71 extern void ldv_mutex_unlock_mutex_of_device(struct mutex *lock); 72 extern int ldv_mutex_lock_interruptible_request_mutex_of_ceph_osd_client(struct mutex *lock); 73 extern int ldv_mutex_lock_killable_request_mutex_of_ceph_osd_client(struct mutex *lock); 74 extern void ldv_mutex_lock_nested_request_mutex_of_ceph_osd_client(struct mutex *lock, unsigned int subclass); 75 extern void ldv_mutex_lock_request_mutex_of_ceph_osd_client(struct mutex *lock); 76 extern int ldv_mutex_trylock_request_mutex_of_ceph_osd_client(struct mutex *lock); 77 extern int ldv_atomic_dec_and_mutex_lock_request_mutex_of_ceph_osd_client(atomic_t *cnt, struct mutex *lock); 78 extern int ldv_mutex_is_locked_request_mutex_of_ceph_osd_client(struct mutex *lock); 79 extern void ldv_mutex_unlock_request_mutex_of_ceph_osd_client(struct mutex *lock); 80 #include <linux/ceph/ceph_debug.h> 81 82 #include <linux/module.h> 83 #include <linux/err.h> 84 #include <linux/slab.h> 85 86 #include <linux/ceph/types.h> 87 #include <linux/ceph/decode.h> 88 #include <linux/ceph/libceph.h> 89 #include <linux/ceph/messenger.h> 90 #include "auth_none.h" 91 #include "auth_x.h" 92 93 94 /* 95 * get protocol handler 96 */ 97 static u32 supported_protocols[] = { 98 CEPH_AUTH_NONE, 99 CEPH_AUTH_CEPHX 100 }; 101 102 static int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol) 103 { 104 switch (protocol) { 105 case CEPH_AUTH_NONE: 106 return ceph_auth_none_init(ac); 107 case CEPH_AUTH_CEPHX: 108 return ceph_x_init(ac); 109 default: 110 return -ENOENT; 111 } 112 } 113 114 /* 115 * setup, teardown. 116 */ 117 struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_crypto_key *key) 118 { 119 struct ceph_auth_client *ac; 120 int ret; 121 122 dout("auth_init name '%s'\n", name); 123 124 ret = -ENOMEM; 125 ac = kzalloc(sizeof(*ac), GFP_NOFS); 126 if (!ac) 127 goto out; 128 129 mutex_init(&ac->mutex); 130 ac->negotiating = true; 131 if (name) 132 ac->name = name; 133 else 134 ac->name = CEPH_AUTH_NAME_DEFAULT; 135 dout("auth_init name %s\n", ac->name); 136 ac->key = key; 137 return ac; 138 139 out: 140 return ERR_PTR(ret); 141 } 142 143 void ceph_auth_destroy(struct ceph_auth_client *ac) 144 { 145 dout("auth_destroy %p\n", ac); 146 if (ac->ops) 147 ac->ops->destroy(ac); 148 kfree(ac); 149 } 150 151 /* 152 * Reset occurs when reconnecting to the monitor. 153 */ 154 void ceph_auth_reset(struct ceph_auth_client *ac) 155 { 156 mutex_lock(&ac->mutex); 157 dout("auth_reset %p\n", ac); 158 if (ac->ops && !ac->negotiating) 159 ac->ops->reset(ac); 160 ac->negotiating = true; 161 mutex_unlock(&ac->mutex); 162 } 163 164 int ceph_entity_name_encode(const char *name, void **p, void *end) 165 { 166 int len = strlen(name); 167 168 if (*p + 2*sizeof(u32) + len > end) 169 return -ERANGE; 170 ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT); 171 ceph_encode_32(p, len); 172 ceph_encode_copy(p, name, len); 173 return 0; 174 } 175 176 /* 177 * Initiate protocol negotiation with monitor. Include entity name 178 * and list supported protocols. 179 */ 180 int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) 181 { 182 struct ceph_mon_request_header *monhdr = buf; 183 void *p = monhdr + 1, *end = buf + len, *lenp; 184 int i, num; 185 int ret; 186 187 mutex_lock(&ac->mutex); 188 dout("auth_build_hello\n"); 189 monhdr->have_version = 0; 190 monhdr->session_mon = cpu_to_le16(-1); 191 monhdr->session_mon_tid = 0; 192 193 ceph_encode_32(&p, 0); /* no protocol, yet */ 194 195 lenp = p; 196 p += sizeof(u32); 197 198 ceph_decode_need(&p, end, 1 + sizeof(u32), bad); 199 ceph_encode_8(&p, 1); 200 num = ARRAY_SIZE(supported_protocols); 201 ceph_encode_32(&p, num); 202 ceph_decode_need(&p, end, num * sizeof(u32), bad); 203 for (i = 0; i < num; i++) 204 ceph_encode_32(&p, supported_protocols[i]); 205 206 ret = ceph_entity_name_encode(ac->name, &p, end); 207 if (ret < 0) 208 goto out; 209 ceph_decode_need(&p, end, sizeof(u64), bad); 210 ceph_encode_64(&p, ac->global_id); 211 212 ceph_encode_32(&lenp, p - lenp - sizeof(u32)); 213 ret = p - buf; 214 out: 215 mutex_unlock(&ac->mutex); 216 return ret; 217 218 bad: 219 ret = -ERANGE; 220 goto out; 221 } 222 223 static int ceph_build_auth_request(struct ceph_auth_client *ac, 224 void *msg_buf, size_t msg_len) 225 { 226 struct ceph_mon_request_header *monhdr = msg_buf; 227 void *p = monhdr + 1; 228 void *end = msg_buf + msg_len; 229 int ret; 230 231 monhdr->have_version = 0; 232 monhdr->session_mon = cpu_to_le16(-1); 233 monhdr->session_mon_tid = 0; 234 235 ceph_encode_32(&p, ac->protocol); 236 237 ret = ac->ops->build_request(ac, p + sizeof(u32), end); 238 if (ret < 0) { 239 pr_err("error %d building auth method %s request\n", ret, 240 ac->ops->name); 241 goto out; 242 } 243 dout(" built request %d bytes\n", ret); 244 ceph_encode_32(&p, ret); 245 ret = p + ret - msg_buf; 246 out: 247 return ret; 248 } 249 250 /* 251 * Handle auth message from monitor. 252 */ 253 int ceph_handle_auth_reply(struct ceph_auth_client *ac, 254 void *buf, size_t len, 255 void *reply_buf, size_t reply_len) 256 { 257 void *p = buf; 258 void *end = buf + len; 259 int protocol; 260 s32 result; 261 u64 global_id; 262 void *payload, *payload_end; 263 int payload_len; 264 char *result_msg; 265 int result_msg_len; 266 int ret = -EINVAL; 267 268 mutex_lock(&ac->mutex); 269 dout("handle_auth_reply %p %p\n", p, end); 270 ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad); 271 protocol = ceph_decode_32(&p); 272 result = ceph_decode_32(&p); 273 global_id = ceph_decode_64(&p); 274 payload_len = ceph_decode_32(&p); 275 payload = p; 276 p += payload_len; 277 ceph_decode_need(&p, end, sizeof(u32), bad); 278 result_msg_len = ceph_decode_32(&p); 279 result_msg = p; 280 p += result_msg_len; 281 if (p != end) 282 goto bad; 283 284 dout(" result %d '%.*s' gid %llu len %d\n", result, result_msg_len, 285 result_msg, global_id, payload_len); 286 287 payload_end = payload + payload_len; 288 289 if (global_id && ac->global_id != global_id) { 290 dout(" set global_id %lld -> %lld\n", ac->global_id, global_id); 291 ac->global_id = global_id; 292 } 293 294 if (ac->negotiating) { 295 /* server does not support our protocols? */ 296 if (!protocol && result < 0) { 297 ret = result; 298 goto out; 299 } 300 /* set up (new) protocol handler? */ 301 if (ac->protocol && ac->protocol != protocol) { 302 ac->ops->destroy(ac); 303 ac->protocol = 0; 304 ac->ops = NULL; 305 } 306 if (ac->protocol != protocol) { 307 ret = ceph_auth_init_protocol(ac, protocol); 308 if (ret) { 309 pr_err("error %d on auth protocol %d init\n", 310 ret, protocol); 311 goto out; 312 } 313 } 314 315 ac->negotiating = false; 316 } 317 318 ret = ac->ops->handle_reply(ac, result, payload, payload_end); 319 if (ret == -EAGAIN) { 320 ret = ceph_build_auth_request(ac, reply_buf, reply_len); 321 } else if (ret) { 322 pr_err("auth method '%s' error %d\n", ac->ops->name, ret); 323 } 324 325 out: 326 mutex_unlock(&ac->mutex); 327 return ret; 328 329 bad: 330 pr_err("failed to decode auth msg\n"); 331 ret = -EINVAL; 332 goto out; 333 } 334 335 int ceph_build_auth(struct ceph_auth_client *ac, 336 void *msg_buf, size_t msg_len) 337 { 338 int ret = 0; 339 340 mutex_lock(&ac->mutex); 341 if (!ac->protocol) 342 ret = ceph_auth_build_hello(ac, msg_buf, msg_len); 343 else if (ac->ops->should_authenticate(ac)) 344 ret = ceph_build_auth_request(ac, msg_buf, msg_len); 345 mutex_unlock(&ac->mutex); 346 return ret; 347 } 348 349 int ceph_auth_is_authenticated(struct ceph_auth_client *ac) 350 { 351 int ret = 0; 352 353 mutex_lock(&ac->mutex); 354 if (ac->ops) 355 ret = ac->ops->is_authenticated(ac); 356 mutex_unlock(&ac->mutex); 357 return ret; 358 } 359 EXPORT_SYMBOL(ceph_auth_is_authenticated); 360 361 int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 362 int peer_type, 363 struct ceph_auth_handshake *auth) 364 { 365 int ret = 0; 366 367 mutex_lock(&ac->mutex); 368 if (ac->ops && ac->ops->create_authorizer) 369 ret = ac->ops->create_authorizer(ac, peer_type, auth); 370 mutex_unlock(&ac->mutex); 371 return ret; 372 } 373 EXPORT_SYMBOL(ceph_auth_create_authorizer); 374 375 void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 376 struct ceph_authorizer *a) 377 { 378 mutex_lock(&ac->mutex); 379 if (ac->ops && ac->ops->destroy_authorizer) 380 ac->ops->destroy_authorizer(ac, a); 381 mutex_unlock(&ac->mutex); 382 } 383 EXPORT_SYMBOL(ceph_auth_destroy_authorizer); 384 385 int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 386 int peer_type, 387 struct ceph_auth_handshake *a) 388 { 389 int ret = 0; 390 391 mutex_lock(&ac->mutex); 392 if (ac->ops && ac->ops->update_authorizer) 393 ret = ac->ops->update_authorizer(ac, peer_type, a); 394 mutex_unlock(&ac->mutex); 395 return ret; 396 } 397 EXPORT_SYMBOL(ceph_auth_update_authorizer); 398 399 int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, 400 struct ceph_authorizer *a, size_t len) 401 { 402 int ret = 0; 403 404 mutex_lock(&ac->mutex); 405 if (ac->ops && ac->ops->verify_authorizer_reply) 406 ret = ac->ops->verify_authorizer_reply(ac, a, len); 407 mutex_unlock(&ac->mutex); 408 return ret; 409 } 410 EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply); 411 412 void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) 413 { 414 mutex_lock(&ac->mutex); 415 if (ac->ops && ac->ops->invalidate_authorizer) 416 ac->ops->invalidate_authorizer(ac, peer_type); 417 mutex_unlock(&ac->mutex); 418 } 419 EXPORT_SYMBOL(ceph_auth_invalidate_authorizer);
1 2 #include <linux/kernel.h> 3 #include <linux/mutex.h> 4 5 extern int mutex_lock_interruptible(struct mutex *lock); 6 extern int mutex_lock_killable(struct mutex *lock); 7 extern void mutex_lock(struct mutex *lock); 8 extern int ldv_mutex_lock_interruptible_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 9 extern int ldv_mutex_lock_killable_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 10 extern void ldv_mutex_lock_nested_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock, unsigned int subclass); 11 extern void ldv_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 12 extern int ldv_mutex_trylock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 13 extern int ldv_atomic_dec_and_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(atomic_t *cnt, struct mutex *lock); 14 extern int ldv_mutex_is_locked_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 15 extern void ldv_mutex_unlock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 16 extern int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock); 17 extern int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock); 18 extern void ldv_mutex_lock_nested_i_mutex_of_inode(struct mutex *lock, unsigned int subclass); 19 extern void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock); 20 extern int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock); 21 extern int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock); 22 extern int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock); 23 extern void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock); 24 extern int ldv_mutex_lock_interruptible_lock(struct mutex *lock); 25 extern int ldv_mutex_lock_killable_lock(struct mutex *lock); 26 extern void ldv_mutex_lock_nested_lock(struct mutex *lock, unsigned int subclass); 27 extern void ldv_mutex_lock_lock(struct mutex *lock); 28 extern int ldv_mutex_trylock_lock(struct mutex *lock); 29 extern int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock); 30 extern int ldv_mutex_is_locked_lock(struct mutex *lock); 31 extern void ldv_mutex_unlock_lock(struct mutex *lock); 32 extern int ldv_mutex_lock_interruptible_mount_mutex_of_ceph_client(struct mutex *lock); 33 extern int ldv_mutex_lock_killable_mount_mutex_of_ceph_client(struct mutex *lock); 34 extern void ldv_mutex_lock_nested_mount_mutex_of_ceph_client(struct mutex *lock, unsigned int subclass); 35 extern void ldv_mutex_lock_mount_mutex_of_ceph_client(struct mutex *lock); 36 extern int ldv_mutex_trylock_mount_mutex_of_ceph_client(struct mutex *lock); 37 extern int ldv_atomic_dec_and_mutex_lock_mount_mutex_of_ceph_client(atomic_t *cnt, struct mutex *lock); 38 extern int ldv_mutex_is_locked_mount_mutex_of_ceph_client(struct mutex *lock); 39 extern void ldv_mutex_unlock_mount_mutex_of_ceph_client(struct mutex *lock); 40 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_auth_client(struct mutex *lock); 41 extern int ldv_mutex_lock_killable_mutex_of_ceph_auth_client(struct mutex *lock); 42 extern void ldv_mutex_lock_nested_mutex_of_ceph_auth_client(struct mutex *lock, unsigned int subclass); 43 extern void ldv_mutex_lock_mutex_of_ceph_auth_client(struct mutex *lock); 44 extern int ldv_mutex_trylock_mutex_of_ceph_auth_client(struct mutex *lock); 45 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_auth_client(atomic_t *cnt, struct mutex *lock); 46 extern int ldv_mutex_is_locked_mutex_of_ceph_auth_client(struct mutex *lock); 47 extern void ldv_mutex_unlock_mutex_of_ceph_auth_client(struct mutex *lock); 48 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_connection(struct mutex *lock); 49 extern int ldv_mutex_lock_killable_mutex_of_ceph_connection(struct mutex *lock); 50 extern void ldv_mutex_lock_nested_mutex_of_ceph_connection(struct mutex *lock, unsigned int subclass); 51 extern void ldv_mutex_lock_mutex_of_ceph_connection(struct mutex *lock); 52 extern int ldv_mutex_trylock_mutex_of_ceph_connection(struct mutex *lock); 53 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_connection(atomic_t *cnt, struct mutex *lock); 54 extern int ldv_mutex_is_locked_mutex_of_ceph_connection(struct mutex *lock); 55 extern void ldv_mutex_unlock_mutex_of_ceph_connection(struct mutex *lock); 56 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_mon_client(struct mutex *lock); 57 extern int ldv_mutex_lock_killable_mutex_of_ceph_mon_client(struct mutex *lock); 58 extern void ldv_mutex_lock_nested_mutex_of_ceph_mon_client(struct mutex *lock, unsigned int subclass); 59 extern void ldv_mutex_lock_mutex_of_ceph_mon_client(struct mutex *lock); 60 extern int ldv_mutex_trylock_mutex_of_ceph_mon_client(struct mutex *lock); 61 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_mon_client(atomic_t *cnt, struct mutex *lock); 62 extern int ldv_mutex_is_locked_mutex_of_ceph_mon_client(struct mutex *lock); 63 extern void ldv_mutex_unlock_mutex_of_ceph_mon_client(struct mutex *lock); 64 extern int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock); 65 extern int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock); 66 extern void ldv_mutex_lock_nested_mutex_of_device(struct mutex *lock, unsigned int subclass); 67 extern void ldv_mutex_lock_mutex_of_device(struct mutex *lock); 68 extern int ldv_mutex_trylock_mutex_of_device(struct mutex *lock); 69 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock); 70 extern int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock); 71 extern void ldv_mutex_unlock_mutex_of_device(struct mutex *lock); 72 extern int ldv_mutex_lock_interruptible_request_mutex_of_ceph_osd_client(struct mutex *lock); 73 extern int ldv_mutex_lock_killable_request_mutex_of_ceph_osd_client(struct mutex *lock); 74 extern void ldv_mutex_lock_nested_request_mutex_of_ceph_osd_client(struct mutex *lock, unsigned int subclass); 75 extern void ldv_mutex_lock_request_mutex_of_ceph_osd_client(struct mutex *lock); 76 extern int ldv_mutex_trylock_request_mutex_of_ceph_osd_client(struct mutex *lock); 77 extern int ldv_atomic_dec_and_mutex_lock_request_mutex_of_ceph_osd_client(atomic_t *cnt, struct mutex *lock); 78 extern int ldv_mutex_is_locked_request_mutex_of_ceph_osd_client(struct mutex *lock); 79 extern void ldv_mutex_unlock_request_mutex_of_ceph_osd_client(struct mutex *lock); 80 81 #include <linux/ceph/ceph_debug.h> 82 83 #include <linux/module.h> 84 #include <linux/types.h> 85 #include <linux/slab.h> 86 #include <linux/random.h> 87 #include <linux/sched.h> 88 89 #include <linux/ceph/mon_client.h> 90 #include <linux/ceph/libceph.h> 91 #include <linux/ceph/debugfs.h> 92 #include <linux/ceph/decode.h> 93 #include <linux/ceph/auth.h> 94 95 /* 96 * Interact with Ceph monitor cluster. Handle requests for new map 97 * versions, and periodically resend as needed. Also implement 98 * statfs() and umount(). 99 * 100 * A small cluster of Ceph "monitors" are responsible for managing critical 101 * cluster configuration and state information. An odd number (e.g., 3, 5) 102 * of cmon daemons use a modified version of the Paxos part-time parliament 103 * algorithm to manage the MDS map (mds cluster membership), OSD map, and 104 * list of clients who have mounted the file system. 105 * 106 * We maintain an open, active session with a monitor at all times in order to 107 * receive timely MDSMap updates. We periodically send a keepalive byte on the 108 * TCP socket to ensure we detect a failure. If the connection does break, we 109 * randomly hunt for a new monitor. Once the connection is reestablished, we 110 * resend any outstanding requests. 111 */ 112 113 static const struct ceph_connection_operations mon_con_ops; 114 115 static int __validate_auth(struct ceph_mon_client *monc); 116 117 /* 118 * Decode a monmap blob (e.g., during mount). 119 */ 120 struct ceph_monmap *ceph_monmap_decode(void *p, void *end) 121 { 122 struct ceph_monmap *m = NULL; 123 int i, err = -EINVAL; 124 struct ceph_fsid fsid; 125 u32 epoch, num_mon; 126 u16 version; 127 u32 len; 128 129 ceph_decode_32_safe(&p, end, len, bad); 130 ceph_decode_need(&p, end, len, bad); 131 132 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); 133 134 ceph_decode_16_safe(&p, end, version, bad); 135 136 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); 137 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 138 epoch = ceph_decode_32(&p); 139 140 num_mon = ceph_decode_32(&p); 141 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); 142 143 if (num_mon >= CEPH_MAX_MON) 144 goto bad; 145 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); 146 if (m == NULL) 147 return ERR_PTR(-ENOMEM); 148 m->fsid = fsid; 149 m->epoch = epoch; 150 m->num_mon = num_mon; 151 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); 152 for (i = 0; i < num_mon; i++) 153 ceph_decode_addr(&m->mon_inst[i].addr); 154 155 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, 156 m->num_mon); 157 for (i = 0; i < m->num_mon; i++) 158 dout("monmap_decode mon%d is %s\n", i, 159 ceph_pr_addr(&m->mon_inst[i].addr.in_addr)); 160 return m; 161 162 bad: 163 dout("monmap_decode failed with %d\n", err); 164 kfree(m); 165 return ERR_PTR(err); 166 } 167 168 /* 169 * return true if *addr is included in the monmap. 170 */ 171 int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) 172 { 173 int i; 174 175 for (i = 0; i < m->num_mon; i++) 176 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) 177 return 1; 178 return 0; 179 } 180 181 /* 182 * Send an auth request. 183 */ 184 static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) 185 { 186 monc->pending_auth = 1; 187 monc->m_auth->front.iov_len = len; 188 monc->m_auth->hdr.front_len = cpu_to_le32(len); 189 ceph_msg_revoke(monc->m_auth); 190 ceph_msg_get(monc->m_auth); /* keep our ref */ 191 ceph_con_send(&monc->con, monc->m_auth); 192 } 193 194 /* 195 * Close monitor session, if any. 196 */ 197 static void __close_session(struct ceph_mon_client *monc) 198 { 199 dout("__close_session closing mon%d\n", monc->cur_mon); 200 ceph_msg_revoke(monc->m_auth); 201 ceph_msg_revoke_incoming(monc->m_auth_reply); 202 ceph_msg_revoke(monc->m_subscribe); 203 ceph_msg_revoke_incoming(monc->m_subscribe_ack); 204 ceph_con_close(&monc->con); 205 monc->cur_mon = -1; 206 monc->pending_auth = 0; 207 ceph_auth_reset(monc->auth); 208 } 209 210 /* 211 * Open a session with a (new) monitor. 212 */ 213 static int __open_session(struct ceph_mon_client *monc) 214 { 215 char r; 216 int ret; 217 218 if (monc->cur_mon < 0) { 219 get_random_bytes(&r, 1); 220 monc->cur_mon = r % monc->monmap->num_mon; 221 dout("open_session num=%d r=%d -> mon%d\n", 222 monc->monmap->num_mon, r, monc->cur_mon); 223 monc->sub_sent = 0; 224 monc->sub_renew_after = jiffies; /* i.e., expired */ 225 monc->want_next_osdmap = !!monc->want_next_osdmap; 226 227 dout("open_session mon%d opening\n", monc->cur_mon); 228 ceph_con_open(&monc->con, 229 CEPH_ENTITY_TYPE_MON, monc->cur_mon, 230 &monc->monmap->mon_inst[monc->cur_mon].addr); 231 232 /* initiatiate authentication handshake */ 233 ret = ceph_auth_build_hello(monc->auth, 234 monc->m_auth->front.iov_base, 235 monc->m_auth->front_alloc_len); 236 __send_prepared_auth_request(monc, ret); 237 } else { 238 dout("open_session mon%d already open\n", monc->cur_mon); 239 } 240 return 0; 241 } 242 243 static bool __sub_expired(struct ceph_mon_client *monc) 244 { 245 return time_after_eq(jiffies, monc->sub_renew_after); 246 } 247 248 /* 249 * Reschedule delayed work timer. 250 */ 251 static void __schedule_delayed(struct ceph_mon_client *monc) 252 { 253 unsigned int delay; 254 255 if (monc->cur_mon < 0 || __sub_expired(monc)) 256 delay = 10 * HZ; 257 else 258 delay = 20 * HZ; 259 dout("__schedule_delayed after %u\n", delay); 260 schedule_delayed_work(&monc->delayed_work, delay); 261 } 262 263 /* 264 * Send subscribe request for mdsmap and/or osdmap. 265 */ 266 static void __send_subscribe(struct ceph_mon_client *monc) 267 { 268 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", 269 (unsigned int)monc->sub_sent, __sub_expired(monc), 270 monc->want_next_osdmap); 271 if ((__sub_expired(monc) && !monc->sub_sent) || 272 monc->want_next_osdmap == 1) { 273 struct ceph_msg *msg = monc->m_subscribe; 274 struct ceph_mon_subscribe_item *i; 275 void *p, *end; 276 int num; 277 278 p = msg->front.iov_base; 279 end = p + msg->front_alloc_len; 280 281 num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap; 282 ceph_encode_32(&p, num); 283 284 if (monc->want_next_osdmap) { 285 dout("__send_subscribe to 'osdmap' %u\n", 286 (unsigned int)monc->have_osdmap); 287 ceph_encode_string(&p, end, "osdmap", 6); 288 i = p; 289 i->have = cpu_to_le64(monc->have_osdmap); 290 i->onetime = 1; 291 p += sizeof(*i); 292 monc->want_next_osdmap = 2; /* requested */ 293 } 294 if (monc->want_mdsmap) { 295 dout("__send_subscribe to 'mdsmap' %u+\n", 296 (unsigned int)monc->have_mdsmap); 297 ceph_encode_string(&p, end, "mdsmap", 6); 298 i = p; 299 i->have = cpu_to_le64(monc->have_mdsmap); 300 i->onetime = 0; 301 p += sizeof(*i); 302 } 303 ceph_encode_string(&p, end, "monmap", 6); 304 i = p; 305 i->have = 0; 306 i->onetime = 0; 307 p += sizeof(*i); 308 309 msg->front.iov_len = p - msg->front.iov_base; 310 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 311 ceph_msg_revoke(msg); 312 ceph_con_send(&monc->con, ceph_msg_get(msg)); 313 314 monc->sub_sent = jiffies | 1; /* never 0 */ 315 } 316 } 317 318 static void handle_subscribe_ack(struct ceph_mon_client *monc, 319 struct ceph_msg *msg) 320 { 321 unsigned int seconds; 322 struct ceph_mon_subscribe_ack *h = msg->front.iov_base; 323 324 if (msg->front.iov_len < sizeof(*h)) 325 goto bad; 326 seconds = le32_to_cpu(h->duration); 327 328 mutex_lock(&monc->mutex); 329 if (monc->hunting) { 330 pr_info("mon%d %s session established\n", 331 monc->cur_mon, 332 ceph_pr_addr(&monc->con.peer_addr.in_addr)); 333 monc->hunting = false; 334 } 335 dout("handle_subscribe_ack after %d seconds\n", seconds); 336 monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1; 337 monc->sub_sent = 0; 338 mutex_unlock(&monc->mutex); 339 return; 340 bad: 341 pr_err("got corrupt subscribe-ack msg\n"); 342 ceph_msg_dump(msg); 343 } 344 345 /* 346 * Keep track of which maps we have 347 */ 348 int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) 349 { 350 mutex_lock(&monc->mutex); 351 monc->have_mdsmap = got; 352 mutex_unlock(&monc->mutex); 353 return 0; 354 } 355 EXPORT_SYMBOL(ceph_monc_got_mdsmap); 356 357 int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) 358 { 359 mutex_lock(&monc->mutex); 360 monc->have_osdmap = got; 361 monc->want_next_osdmap = 0; 362 mutex_unlock(&monc->mutex); 363 return 0; 364 } 365 366 /* 367 * Register interest in the next osdmap 368 */ 369 void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) 370 { 371 dout("request_next_osdmap have %u\n", monc->have_osdmap); 372 mutex_lock(&monc->mutex); 373 if (!monc->want_next_osdmap) 374 monc->want_next_osdmap = 1; 375 if (monc->want_next_osdmap < 2) 376 __send_subscribe(monc); 377 mutex_unlock(&monc->mutex); 378 } 379 EXPORT_SYMBOL(ceph_monc_request_next_osdmap); 380 381 int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, 382 unsigned long timeout) 383 { 384 unsigned long started = jiffies; 385 int ret; 386 387 mutex_lock(&monc->mutex); 388 while (monc->have_osdmap < epoch) { 389 mutex_unlock(&monc->mutex); 390 391 if (timeout != 0 && time_after_eq(jiffies, started + timeout)) 392 return -ETIMEDOUT; 393 394 ret = wait_event_interruptible_timeout(monc->client->auth_wq, 395 monc->have_osdmap >= epoch, timeout); 396 if (ret < 0) 397 return ret; 398 399 mutex_lock(&monc->mutex); 400 } 401 402 mutex_unlock(&monc->mutex); 403 return 0; 404 } 405 EXPORT_SYMBOL(ceph_monc_wait_osdmap); 406 407 /* 408 * 409 */ 410 int ceph_monc_open_session(struct ceph_mon_client *monc) 411 { 412 mutex_lock(&monc->mutex); 413 __open_session(monc); 414 __schedule_delayed(monc); 415 mutex_unlock(&monc->mutex); 416 return 0; 417 } 418 EXPORT_SYMBOL(ceph_monc_open_session); 419 420 /* 421 * We require the fsid and global_id in order to initialize our 422 * debugfs dir. 423 */ 424 static bool have_debugfs_info(struct ceph_mon_client *monc) 425 { 426 dout("have_debugfs_info fsid %d globalid %lld\n", 427 (int)monc->client->have_fsid, monc->auth->global_id); 428 return monc->client->have_fsid && monc->auth->global_id > 0; 429 } 430 431 /* 432 * The monitor responds with mount ack indicate mount success. The 433 * included client ticket allows the client to talk to MDSs and OSDs. 434 */ 435 static void ceph_monc_handle_map(struct ceph_mon_client *monc, 436 struct ceph_msg *msg) 437 { 438 struct ceph_client *client = monc->client; 439 struct ceph_monmap *monmap = NULL, *old = monc->monmap; 440 void *p, *end; 441 int had_debugfs_info, init_debugfs = 0; 442 443 mutex_lock(&monc->mutex); 444 445 had_debugfs_info = have_debugfs_info(monc); 446 447 dout("handle_monmap\n"); 448 p = msg->front.iov_base; 449 end = p + msg->front.iov_len; 450 451 monmap = ceph_monmap_decode(p, end); 452 if (IS_ERR(monmap)) { 453 pr_err("problem decoding monmap, %d\n", 454 (int)PTR_ERR(monmap)); 455 goto out; 456 } 457 458 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { 459 kfree(monmap); 460 goto out; 461 } 462 463 client->monc.monmap = monmap; 464 kfree(old); 465 466 if (!client->have_fsid) { 467 client->have_fsid = true; 468 if (!had_debugfs_info && have_debugfs_info(monc)) { 469 pr_info("client%lld fsid %pU\n", 470 ceph_client_id(monc->client), 471 &monc->client->fsid); 472 init_debugfs = 1; 473 } 474 mutex_unlock(&monc->mutex); 475 476 if (init_debugfs) { 477 /* 478 * do debugfs initialization without mutex to avoid 479 * creating a locking dependency 480 */ 481 ceph_debugfs_client_init(monc->client); 482 } 483 484 goto out_unlocked; 485 } 486 out: 487 mutex_unlock(&monc->mutex); 488 out_unlocked: 489 wake_up_all(&client->auth_wq); 490 } 491 492 /* 493 * generic requests (e.g., statfs, poolop) 494 */ 495 static struct ceph_mon_generic_request *__lookup_generic_req( 496 struct ceph_mon_client *monc, u64 tid) 497 { 498 struct ceph_mon_generic_request *req; 499 struct rb_node *n = monc->generic_request_tree.rb_node; 500 501 while (n) { 502 req = rb_entry(n, struct ceph_mon_generic_request, node); 503 if (tid < req->tid) 504 n = n->rb_left; 505 else if (tid > req->tid) 506 n = n->rb_right; 507 else 508 return req; 509 } 510 return NULL; 511 } 512 513 static void __insert_generic_request(struct ceph_mon_client *monc, 514 struct ceph_mon_generic_request *new) 515 { 516 struct rb_node **p = &monc->generic_request_tree.rb_node; 517 struct rb_node *parent = NULL; 518 struct ceph_mon_generic_request *req = NULL; 519 520 while (*p) { 521 parent = *p; 522 req = rb_entry(parent, struct ceph_mon_generic_request, node); 523 if (new->tid < req->tid) 524 p = &(*p)->rb_left; 525 else if (new->tid > req->tid) 526 p = &(*p)->rb_right; 527 else 528 BUG(); 529 } 530 531 rb_link_node(&new->node, parent, p); 532 rb_insert_color(&new->node, &monc->generic_request_tree); 533 } 534 535 static void release_generic_request(struct kref *kref) 536 { 537 struct ceph_mon_generic_request *req = 538 container_of(kref, struct ceph_mon_generic_request, kref); 539 540 if (req->reply) 541 ceph_msg_put(req->reply); 542 if (req->request) 543 ceph_msg_put(req->request); 544 545 kfree(req); 546 } 547 548 static void put_generic_request(struct ceph_mon_generic_request *req) 549 { 550 kref_put(&req->kref, release_generic_request); 551 } 552 553 static void get_generic_request(struct ceph_mon_generic_request *req) 554 { 555 kref_get(&req->kref); 556 } 557 558 static struct ceph_msg *get_generic_reply(struct ceph_connection *con, 559 struct ceph_msg_header *hdr, 560 int *skip) 561 { 562 struct ceph_mon_client *monc = con->private; 563 struct ceph_mon_generic_request *req; 564 u64 tid = le64_to_cpu(hdr->tid); 565 struct ceph_msg *m; 566 567 mutex_lock(&monc->mutex); 568 req = __lookup_generic_req(monc, tid); 569 if (!req) { 570 dout("get_generic_reply %lld dne\n", tid); 571 *skip = 1; 572 m = NULL; 573 } else { 574 dout("get_generic_reply %lld got %p\n", tid, req->reply); 575 *skip = 0; 576 m = ceph_msg_get(req->reply); 577 /* 578 * we don't need to track the connection reading into 579 * this reply because we only have one open connection 580 * at a time, ever. 581 */ 582 } 583 mutex_unlock(&monc->mutex); 584 return m; 585 } 586 587 static int __do_generic_request(struct ceph_mon_client *monc, u64 tid, 588 struct ceph_mon_generic_request *req) 589 { 590 int err; 591 592 /* register request */ 593 req->tid = tid != 0 ? tid : ++monc->last_tid; 594 req->request->hdr.tid = cpu_to_le64(req->tid); 595 __insert_generic_request(monc, req); 596 monc->num_generic_requests++; 597 ceph_con_send(&monc->con, ceph_msg_get(req->request)); 598 mutex_unlock(&monc->mutex); 599 600 err = wait_for_completion_interruptible(&req->completion); 601 602 mutex_lock(&monc->mutex); 603 rb_erase(&req->node, &monc->generic_request_tree); 604 monc->num_generic_requests--; 605 606 if (!err) 607 err = req->result; 608 return err; 609 } 610 611 static int do_generic_request(struct ceph_mon_client *monc, 612 struct ceph_mon_generic_request *req) 613 { 614 int err; 615 616 mutex_lock(&monc->mutex); 617 err = __do_generic_request(monc, 0, req); 618 mutex_unlock(&monc->mutex); 619 620 return err; 621 } 622 623 /* 624 * statfs 625 */ 626 static void handle_statfs_reply(struct ceph_mon_client *monc, 627 struct ceph_msg *msg) 628 { 629 struct ceph_mon_generic_request *req; 630 struct ceph_mon_statfs_reply *reply = msg->front.iov_base; 631 u64 tid = le64_to_cpu(msg->hdr.tid); 632 633 if (msg->front.iov_len != sizeof(*reply)) 634 goto bad; 635 dout("handle_statfs_reply %p tid %llu\n", msg, tid); 636 637 mutex_lock(&monc->mutex); 638 req = __lookup_generic_req(monc, tid); 639 if (req) { 640 *(struct ceph_statfs *)req->buf = reply->st; 641 req->result = 0; 642 get_generic_request(req); 643 } 644 mutex_unlock(&monc->mutex); 645 if (req) { 646 complete_all(&req->completion); 647 put_generic_request(req); 648 } 649 return; 650 651 bad: 652 pr_err("corrupt generic reply, tid %llu\n", tid); 653 ceph_msg_dump(msg); 654 } 655 656 /* 657 * Do a synchronous statfs(). 658 */ 659 int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) 660 { 661 struct ceph_mon_generic_request *req; 662 struct ceph_mon_statfs *h; 663 int err; 664 665 req = kzalloc(sizeof(*req), GFP_NOFS); 666 if (!req) 667 return -ENOMEM; 668 669 kref_init(&req->kref); 670 req->buf = buf; 671 req->buf_len = sizeof(*buf); 672 init_completion(&req->completion); 673 674 err = -ENOMEM; 675 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, 676 true); 677 if (!req->request) 678 goto out; 679 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS, 680 true); 681 if (!req->reply) 682 goto out; 683 684 /* fill out request */ 685 h = req->request->front.iov_base; 686 h->monhdr.have_version = 0; 687 h->monhdr.session_mon = cpu_to_le16(-1); 688 h->monhdr.session_mon_tid = 0; 689 h->fsid = monc->monmap->fsid; 690 691 err = do_generic_request(monc, req); 692 693 out: 694 kref_put(&req->kref, release_generic_request); 695 return err; 696 } 697 EXPORT_SYMBOL(ceph_monc_do_statfs); 698 699 static void handle_get_version_reply(struct ceph_mon_client *monc, 700 struct ceph_msg *msg) 701 { 702 struct ceph_mon_generic_request *req; 703 u64 tid = le64_to_cpu(msg->hdr.tid); 704 void *p = msg->front.iov_base; 705 void *end = p + msg->front_alloc_len; 706 u64 handle; 707 708 dout("%s %p tid %llu\n", __func__, msg, tid); 709 710 ceph_decode_need(&p, end, 2*sizeof(u64), bad); 711 handle = ceph_decode_64(&p); 712 if (tid != 0 && tid != handle) 713 goto bad; 714 715 mutex_lock(&monc->mutex); 716 req = __lookup_generic_req(monc, handle); 717 if (req) { 718 *(u64 *)req->buf = ceph_decode_64(&p); 719 req->result = 0; 720 get_generic_request(req); 721 } 722 mutex_unlock(&monc->mutex); 723 if (req) { 724 complete_all(&req->completion); 725 put_generic_request(req); 726 } 727 728 return; 729 bad: 730 pr_err("corrupt mon_get_version reply\n"); 731 ceph_msg_dump(msg); 732 } 733 734 /* 735 * Send MMonGetVersion and wait for the reply. 736 * 737 * @what: one of "mdsmap", "osdmap" or "monmap" 738 */ 739 int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what, 740 u64 *newest) 741 { 742 struct ceph_mon_generic_request *req; 743 void *p, *end; 744 u64 tid; 745 int err; 746 747 req = kzalloc(sizeof(*req), GFP_NOFS); 748 if (!req) 749 return -ENOMEM; 750 751 kref_init(&req->kref); 752 req->buf = newest; 753 req->buf_len = sizeof(*newest); 754 init_completion(&req->completion); 755 756 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, 757 sizeof(u64) + sizeof(u32) + strlen(what), 758 GFP_NOFS, true); 759 if (!req->request) { 760 err = -ENOMEM; 761 goto out; 762 } 763 764 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024, 765 GFP_NOFS, true); 766 if (!req->reply) { 767 err = -ENOMEM; 768 goto out; 769 } 770 771 p = req->request->front.iov_base; 772 end = p + req->request->front_alloc_len; 773 774 /* fill out request */ 775 mutex_lock(&monc->mutex); 776 tid = ++monc->last_tid; 777 ceph_encode_64(&p, tid); /* handle */ 778 ceph_encode_string(&p, end, what, strlen(what)); 779 780 err = __do_generic_request(monc, tid, req); 781 782 mutex_unlock(&monc->mutex); 783 out: 784 kref_put(&req->kref, release_generic_request); 785 return err; 786 } 787 EXPORT_SYMBOL(ceph_monc_do_get_version); 788 789 /* 790 * pool ops 791 */ 792 static int get_poolop_reply_buf(const char *src, size_t src_len, 793 char *dst, size_t dst_len) 794 { 795 u32 buf_len; 796 797 if (src_len != sizeof(u32) + dst_len) 798 return -EINVAL; 799 800 buf_len = le32_to_cpu(*(u32 *)src); 801 if (buf_len != dst_len) 802 return -EINVAL; 803 804 memcpy(dst, src + sizeof(u32), dst_len); 805 return 0; 806 } 807 808 static void handle_poolop_reply(struct ceph_mon_client *monc, 809 struct ceph_msg *msg) 810 { 811 struct ceph_mon_generic_request *req; 812 struct ceph_mon_poolop_reply *reply = msg->front.iov_base; 813 u64 tid = le64_to_cpu(msg->hdr.tid); 814 815 if (msg->front.iov_len < sizeof(*reply)) 816 goto bad; 817 dout("handle_poolop_reply %p tid %llu\n", msg, tid); 818 819 mutex_lock(&monc->mutex); 820 req = __lookup_generic_req(monc, tid); 821 if (req) { 822 if (req->buf_len && 823 get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply), 824 msg->front.iov_len - sizeof(*reply), 825 req->buf, req->buf_len) < 0) { 826 mutex_unlock(&monc->mutex); 827 goto bad; 828 } 829 req->result = le32_to_cpu(reply->reply_code); 830 get_generic_request(req); 831 } 832 mutex_unlock(&monc->mutex); 833 if (req) { 834 complete(&req->completion); 835 put_generic_request(req); 836 } 837 return; 838 839 bad: 840 pr_err("corrupt generic reply, tid %llu\n", tid); 841 ceph_msg_dump(msg); 842 } 843 844 /* 845 * Do a synchronous pool op. 846 */ 847 static int do_poolop(struct ceph_mon_client *monc, u32 op, 848 u32 pool, u64 snapid, 849 char *buf, int len) 850 { 851 struct ceph_mon_generic_request *req; 852 struct ceph_mon_poolop *h; 853 int err; 854 855 req = kzalloc(sizeof(*req), GFP_NOFS); 856 if (!req) 857 return -ENOMEM; 858 859 kref_init(&req->kref); 860 req->buf = buf; 861 req->buf_len = len; 862 init_completion(&req->completion); 863 864 err = -ENOMEM; 865 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS, 866 true); 867 if (!req->request) 868 goto out; 869 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS, 870 true); 871 if (!req->reply) 872 goto out; 873 874 /* fill out request */ 875 req->request->hdr.version = cpu_to_le16(2); 876 h = req->request->front.iov_base; 877 h->monhdr.have_version = 0; 878 h->monhdr.session_mon = cpu_to_le16(-1); 879 h->monhdr.session_mon_tid = 0; 880 h->fsid = monc->monmap->fsid; 881 h->pool = cpu_to_le32(pool); 882 h->op = cpu_to_le32(op); 883 h->auid = 0; 884 h->snapid = cpu_to_le64(snapid); 885 h->name_len = 0; 886 887 err = do_generic_request(monc, req); 888 889 out: 890 kref_put(&req->kref, release_generic_request); 891 return err; 892 } 893 894 int ceph_monc_create_snapid(struct ceph_mon_client *monc, 895 u32 pool, u64 *snapid) 896 { 897 return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, 898 pool, 0, (char *)snapid, sizeof(*snapid)); 899 900 } 901 EXPORT_SYMBOL(ceph_monc_create_snapid); 902 903 int ceph_monc_delete_snapid(struct ceph_mon_client *monc, 904 u32 pool, u64 snapid) 905 { 906 return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, 907 pool, snapid, NULL, 0); 908 909 } 910 911 /* 912 * Resend pending generic requests. 913 */ 914 static void __resend_generic_request(struct ceph_mon_client *monc) 915 { 916 struct ceph_mon_generic_request *req; 917 struct rb_node *p; 918 919 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { 920 req = rb_entry(p, struct ceph_mon_generic_request, node); 921 ceph_msg_revoke(req->request); 922 ceph_msg_revoke_incoming(req->reply); 923 ceph_con_send(&monc->con, ceph_msg_get(req->request)); 924 } 925 } 926 927 /* 928 * Delayed work. If we haven't mounted yet, retry. Otherwise, 929 * renew/retry subscription as needed (in case it is timing out, or we 930 * got an ENOMEM). And keep the monitor connection alive. 931 */ 932 static void delayed_work(struct work_struct *work) 933 { 934 struct ceph_mon_client *monc = 935 container_of(work, struct ceph_mon_client, delayed_work.work); 936 937 dout("monc delayed_work\n"); 938 mutex_lock(&monc->mutex); 939 if (monc->hunting) { 940 __close_session(monc); 941 __open_session(monc); /* continue hunting */ 942 } else { 943 ceph_con_keepalive(&monc->con); 944 945 __validate_auth(monc); 946 947 if (ceph_auth_is_authenticated(monc->auth)) 948 __send_subscribe(monc); 949 } 950 __schedule_delayed(monc); 951 mutex_unlock(&monc->mutex); 952 } 953 954 /* 955 * On startup, we build a temporary monmap populated with the IPs 956 * provided by mount(2). 957 */ 958 static int build_initial_monmap(struct ceph_mon_client *monc) 959 { 960 struct ceph_options *opt = monc->client->options; 961 struct ceph_entity_addr *mon_addr = opt->mon_addr; 962 int num_mon = opt->num_mon; 963 int i; 964 965 /* build initial monmap */ 966 monc->monmap = kzalloc(sizeof(*monc->monmap) + 967 num_mon*sizeof(monc->monmap->mon_inst[0]), 968 GFP_KERNEL); 969 if (!monc->monmap) 970 return -ENOMEM; 971 for (i = 0; i < num_mon; i++) { 972 monc->monmap->mon_inst[i].addr = mon_addr[i]; 973 monc->monmap->mon_inst[i].addr.nonce = 0; 974 monc->monmap->mon_inst[i].name.type = 975 CEPH_ENTITY_TYPE_MON; 976 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); 977 } 978 monc->monmap->num_mon = num_mon; 979 return 0; 980 } 981 982 int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) 983 { 984 int err = 0; 985 986 dout("init\n"); 987 memset(monc, 0, sizeof(*monc)); 988 monc->client = cl; 989 monc->monmap = NULL; 990 mutex_init(&monc->mutex); 991 992 err = build_initial_monmap(monc); 993 if (err) 994 goto out; 995 996 /* connection */ 997 /* authentication */ 998 monc->auth = ceph_auth_init(cl->options->name, 999 cl->options->key); 1000 if (IS_ERR(monc->auth)) { 1001 err = PTR_ERR(monc->auth); 1002 goto out_monmap; 1003 } 1004 monc->auth->want_keys = 1005 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | 1006 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; 1007 1008 /* msgs */ 1009 err = -ENOMEM; 1010 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, 1011 sizeof(struct ceph_mon_subscribe_ack), 1012 GFP_NOFS, true); 1013 if (!monc->m_subscribe_ack) 1014 goto out_auth; 1015 1016 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS, 1017 true); 1018 if (!monc->m_subscribe) 1019 goto out_subscribe_ack; 1020 1021 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS, 1022 true); 1023 if (!monc->m_auth_reply) 1024 goto out_subscribe; 1025 1026 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true); 1027 monc->pending_auth = 0; 1028 if (!monc->m_auth) 1029 goto out_auth_reply; 1030 1031 ceph_con_init(&monc->con, monc, &mon_con_ops, 1032 &monc->client->msgr); 1033 1034 monc->cur_mon = -1; 1035 monc->hunting = true; 1036 monc->sub_renew_after = jiffies; 1037 monc->sub_sent = 0; 1038 1039 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); 1040 monc->generic_request_tree = RB_ROOT; 1041 monc->num_generic_requests = 0; 1042 monc->last_tid = 0; 1043 1044 monc->have_mdsmap = 0; 1045 monc->have_osdmap = 0; 1046 monc->want_next_osdmap = 1; 1047 return 0; 1048 1049 out_auth_reply: 1050 ceph_msg_put(monc->m_auth_reply); 1051 out_subscribe: 1052 ceph_msg_put(monc->m_subscribe); 1053 out_subscribe_ack: 1054 ceph_msg_put(monc->m_subscribe_ack); 1055 out_auth: 1056 ceph_auth_destroy(monc->auth); 1057 out_monmap: 1058 kfree(monc->monmap); 1059 out: 1060 return err; 1061 } 1062 EXPORT_SYMBOL(ceph_monc_init); 1063 1064 void ceph_monc_stop(struct ceph_mon_client *monc) 1065 { 1066 dout("stop\n"); 1067 cancel_delayed_work_sync(&monc->delayed_work); 1068 1069 mutex_lock(&monc->mutex); 1070 __close_session(monc); 1071 1072 mutex_unlock(&monc->mutex); 1073 1074 /* 1075 * flush msgr queue before we destroy ourselves to ensure that: 1076 * - any work that references our embedded con is finished. 1077 * - any osd_client or other work that may reference an authorizer 1078 * finishes before we shut down the auth subsystem. 1079 */ 1080 ceph_msgr_flush(); 1081 1082 ceph_auth_destroy(monc->auth); 1083 1084 ceph_msg_put(monc->m_auth); 1085 ceph_msg_put(monc->m_auth_reply); 1086 ceph_msg_put(monc->m_subscribe); 1087 ceph_msg_put(monc->m_subscribe_ack); 1088 1089 kfree(monc->monmap); 1090 } 1091 EXPORT_SYMBOL(ceph_monc_stop); 1092 1093 static void handle_auth_reply(struct ceph_mon_client *monc, 1094 struct ceph_msg *msg) 1095 { 1096 int ret; 1097 int was_auth = 0; 1098 int had_debugfs_info, init_debugfs = 0; 1099 1100 mutex_lock(&monc->mutex); 1101 had_debugfs_info = have_debugfs_info(monc); 1102 was_auth = ceph_auth_is_authenticated(monc->auth); 1103 monc->pending_auth = 0; 1104 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, 1105 msg->front.iov_len, 1106 monc->m_auth->front.iov_base, 1107 monc->m_auth->front_alloc_len); 1108 if (ret < 0) { 1109 monc->client->auth_err = ret; 1110 wake_up_all(&monc->client->auth_wq); 1111 } else if (ret > 0) { 1112 __send_prepared_auth_request(monc, ret); 1113 } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { 1114 dout("authenticated, starting session\n"); 1115 1116 monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; 1117 monc->client->msgr.inst.name.num = 1118 cpu_to_le64(monc->auth->global_id); 1119 1120 __send_subscribe(monc); 1121 __resend_generic_request(monc); 1122 } 1123 1124 if (!had_debugfs_info && have_debugfs_info(monc)) { 1125 pr_info("client%lld fsid %pU\n", 1126 ceph_client_id(monc->client), 1127 &monc->client->fsid); 1128 init_debugfs = 1; 1129 } 1130 mutex_unlock(&monc->mutex); 1131 1132 if (init_debugfs) { 1133 /* 1134 * do debugfs initialization without mutex to avoid 1135 * creating a locking dependency 1136 */ 1137 ceph_debugfs_client_init(monc->client); 1138 } 1139 } 1140 1141 static int __validate_auth(struct ceph_mon_client *monc) 1142 { 1143 int ret; 1144 1145 if (monc->pending_auth) 1146 return 0; 1147 1148 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, 1149 monc->m_auth->front_alloc_len); 1150 if (ret <= 0) 1151 return ret; /* either an error, or no need to authenticate */ 1152 __send_prepared_auth_request(monc, ret); 1153 return 0; 1154 } 1155 1156 int ceph_monc_validate_auth(struct ceph_mon_client *monc) 1157 { 1158 int ret; 1159 1160 mutex_lock(&monc->mutex); 1161 ret = __validate_auth(monc); 1162 mutex_unlock(&monc->mutex); 1163 return ret; 1164 } 1165 EXPORT_SYMBOL(ceph_monc_validate_auth); 1166 1167 /* 1168 * handle incoming message 1169 */ 1170 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 1171 { 1172 struct ceph_mon_client *monc = con->private; 1173 int type = le16_to_cpu(msg->hdr.type); 1174 1175 if (!monc) 1176 return; 1177 1178 switch (type) { 1179 case CEPH_MSG_AUTH_REPLY: 1180 handle_auth_reply(monc, msg); 1181 break; 1182 1183 case CEPH_MSG_MON_SUBSCRIBE_ACK: 1184 handle_subscribe_ack(monc, msg); 1185 break; 1186 1187 case CEPH_MSG_STATFS_REPLY: 1188 handle_statfs_reply(monc, msg); 1189 break; 1190 1191 case CEPH_MSG_MON_GET_VERSION_REPLY: 1192 handle_get_version_reply(monc, msg); 1193 break; 1194 1195 case CEPH_MSG_POOLOP_REPLY: 1196 handle_poolop_reply(monc, msg); 1197 break; 1198 1199 case CEPH_MSG_MON_MAP: 1200 ceph_monc_handle_map(monc, msg); 1201 break; 1202 1203 case CEPH_MSG_OSD_MAP: 1204 ceph_osdc_handle_map(&monc->client->osdc, msg); 1205 break; 1206 1207 default: 1208 /* can the chained handler handle it? */ 1209 if (monc->client->extra_mon_dispatch && 1210 monc->client->extra_mon_dispatch(monc->client, msg) == 0) 1211 break; 1212 1213 pr_err("received unknown message type %d %s\n", type, 1214 ceph_msg_type_name(type)); 1215 } 1216 ceph_msg_put(msg); 1217 } 1218 1219 /* 1220 * Allocate memory for incoming message 1221 */ 1222 static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, 1223 struct ceph_msg_header *hdr, 1224 int *skip) 1225 { 1226 struct ceph_mon_client *monc = con->private; 1227 int type = le16_to_cpu(hdr->type); 1228 int front_len = le32_to_cpu(hdr->front_len); 1229 struct ceph_msg *m = NULL; 1230 1231 *skip = 0; 1232 1233 switch (type) { 1234 case CEPH_MSG_MON_SUBSCRIBE_ACK: 1235 m = ceph_msg_get(monc->m_subscribe_ack); 1236 break; 1237 case CEPH_MSG_POOLOP_REPLY: 1238 case CEPH_MSG_STATFS_REPLY: 1239 return get_generic_reply(con, hdr, skip); 1240 case CEPH_MSG_AUTH_REPLY: 1241 m = ceph_msg_get(monc->m_auth_reply); 1242 break; 1243 case CEPH_MSG_MON_GET_VERSION_REPLY: 1244 if (le64_to_cpu(hdr->tid) != 0) 1245 return get_generic_reply(con, hdr, skip); 1246 1247 /* 1248 * Older OSDs don't set reply tid even if the orignal 1249 * request had a non-zero tid. Workaround this weirdness 1250 * by falling through to the allocate case. 1251 */ 1252 case CEPH_MSG_MON_MAP: 1253 case CEPH_MSG_MDS_MAP: 1254 case CEPH_MSG_OSD_MAP: 1255 m = ceph_msg_new(type, front_len, GFP_NOFS, false); 1256 if (!m) 1257 return NULL; /* ENOMEM--return skip == 0 */ 1258 break; 1259 } 1260 1261 if (!m) { 1262 pr_info("alloc_msg unknown type %d\n", type); 1263 *skip = 1; 1264 } 1265 return m; 1266 } 1267 1268 /* 1269 * If the monitor connection resets, pick a new monitor and resubmit 1270 * any pending requests. 1271 */ 1272 static void mon_fault(struct ceph_connection *con) 1273 { 1274 struct ceph_mon_client *monc = con->private; 1275 1276 if (!monc) 1277 return; 1278 1279 dout("mon_fault\n"); 1280 mutex_lock(&monc->mutex); 1281 if (!con->private) 1282 goto out; 1283 1284 if (!monc->hunting) 1285 pr_info("mon%d %s session lost, " 1286 "hunting for new mon\n", monc->cur_mon, 1287 ceph_pr_addr(&monc->con.peer_addr.in_addr)); 1288 1289 __close_session(monc); 1290 if (!monc->hunting) { 1291 /* start hunting */ 1292 monc->hunting = true; 1293 __open_session(monc); 1294 } else { 1295 /* already hunting, let's wait a bit */ 1296 __schedule_delayed(monc); 1297 } 1298 out: 1299 mutex_unlock(&monc->mutex); 1300 } 1301 1302 /* 1303 * We can ignore refcounting on the connection struct, as all references 1304 * will come from the messenger workqueue, which is drained prior to 1305 * mon_client destruction. 1306 */ 1307 static struct ceph_connection *con_get(struct ceph_connection *con) 1308 { 1309 return con; 1310 } 1311 1312 static void con_put(struct ceph_connection *con) 1313 { 1314 } 1315 1316 static const struct ceph_connection_operations mon_con_ops = { 1317 .get = con_get, 1318 .put = con_put, 1319 .dispatch = dispatch, 1320 .fault = mon_fault, 1321 .alloc_msg = mon_alloc_msg, 1322 }; 1323 1324 1325 1326 1327 1328 /* LDV_COMMENT_BEGIN_MAIN */ 1329 #ifdef LDV_MAIN5_sequence_infinite_withcheck_stateful 1330 1331 /*###########################################################################*/ 1332 1333 /*############## Driver Environment Generator 0.2 output ####################*/ 1334 1335 /*###########################################################################*/ 1336 1337 1338 1339 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1340 void ldv_check_final_state(void); 1341 1342 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1343 void ldv_check_return_value(int res); 1344 1345 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1346 void ldv_check_return_value_probe(int res); 1347 1348 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1349 void ldv_initialize(void); 1350 1351 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1352 void ldv_handler_precall(void); 1353 1354 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1355 int nondet_int(void); 1356 1357 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1358 int LDV_IN_INTERRUPT; 1359 1360 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1361 void ldv_main5_sequence_infinite_withcheck_stateful(void) { 1362 1363 1364 1365 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 1366 /*============================= VARIABLE DECLARATION PART =============================*/ 1367 /** STRUCT: struct type: ceph_connection_operations, struct name: mon_con_ops **/ 1368 /* content: static struct ceph_connection *con_get(struct ceph_connection *con)*/ 1369 /* LDV_COMMENT_END_PREP */ 1370 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "con_get" */ 1371 struct ceph_connection * var_group1; 1372 /* content: static void con_put(struct ceph_connection *con)*/ 1373 /* LDV_COMMENT_END_PREP */ 1374 /* content: static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)*/ 1375 /* LDV_COMMENT_END_PREP */ 1376 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dispatch" */ 1377 struct ceph_msg * var_group2; 1378 /* content: static void mon_fault(struct ceph_connection *con)*/ 1379 /* LDV_COMMENT_END_PREP */ 1380 /* content: static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip)*/ 1381 /* LDV_COMMENT_END_PREP */ 1382 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mon_alloc_msg" */ 1383 struct ceph_msg_header * var_group3; 1384 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mon_alloc_msg" */ 1385 int * var_mon_alloc_msg_42_p2; 1386 1387 1388 1389 1390 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 1391 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 1392 /*============================= VARIABLE INITIALIZING PART =============================*/ 1393 LDV_IN_INTERRUPT=1; 1394 1395 1396 1397 1398 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 1399 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 1400 /*============================= FUNCTION CALL SECTION =============================*/ 1401 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 1402 ldv_initialize(); 1403 1404 1405 1406 while( nondet_int() 1407 ) { 1408 1409 switch(nondet_int()) { 1410 1411 case 0: { 1412 1413 /** STRUCT: struct type: ceph_connection_operations, struct name: mon_con_ops **/ 1414 1415 1416 /* content: static struct ceph_connection *con_get(struct ceph_connection *con)*/ 1417 /* LDV_COMMENT_END_PREP */ 1418 /* LDV_COMMENT_FUNCTION_CALL Function from field "get" from driver structure with callbacks "mon_con_ops" */ 1419 ldv_handler_precall(); 1420 con_get( var_group1); 1421 1422 1423 1424 1425 } 1426 1427 break; 1428 case 1: { 1429 1430 /** STRUCT: struct type: ceph_connection_operations, struct name: mon_con_ops **/ 1431 1432 1433 /* content: static void con_put(struct ceph_connection *con)*/ 1434 /* LDV_COMMENT_END_PREP */ 1435 /* LDV_COMMENT_FUNCTION_CALL Function from field "put" from driver structure with callbacks "mon_con_ops" */ 1436 ldv_handler_precall(); 1437 con_put( var_group1); 1438 1439 1440 1441 1442 } 1443 1444 break; 1445 case 2: { 1446 1447 /** STRUCT: struct type: ceph_connection_operations, struct name: mon_con_ops **/ 1448 1449 1450 /* content: static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)*/ 1451 /* LDV_COMMENT_END_PREP */ 1452 /* LDV_COMMENT_FUNCTION_CALL Function from field "dispatch" from driver structure with callbacks "mon_con_ops" */ 1453 ldv_handler_precall(); 1454 dispatch( var_group1, var_group2); 1455 1456 1457 1458 1459 } 1460 1461 break; 1462 case 3: { 1463 1464 /** STRUCT: struct type: ceph_connection_operations, struct name: mon_con_ops **/ 1465 1466 1467 /* content: static void mon_fault(struct ceph_connection *con)*/ 1468 /* LDV_COMMENT_END_PREP */ 1469 /* LDV_COMMENT_FUNCTION_CALL Function from field "fault" from driver structure with callbacks "mon_con_ops" */ 1470 ldv_handler_precall(); 1471 mon_fault( var_group1); 1472 1473 1474 1475 1476 } 1477 1478 break; 1479 case 4: { 1480 1481 /** STRUCT: struct type: ceph_connection_operations, struct name: mon_con_ops **/ 1482 1483 1484 /* content: static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip)*/ 1485 /* LDV_COMMENT_END_PREP */ 1486 /* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_msg" from driver structure with callbacks "mon_con_ops" */ 1487 ldv_handler_precall(); 1488 mon_alloc_msg( var_group1, var_group3, var_mon_alloc_msg_42_p2); 1489 1490 1491 1492 1493 } 1494 1495 break; 1496 default: break; 1497 1498 } 1499 1500 } 1501 1502 ldv_module_exit: 1503 1504 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 1505 ldv_final: ldv_check_final_state(); 1506 1507 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 1508 return; 1509 1510 } 1511 #endif 1512 1513 /* LDV_COMMENT_END_MAIN */
1 2 #include <linux/kernel.h> 3 #include <linux/mutex.h> 4 5 extern int mutex_lock_interruptible(struct mutex *lock); 6 extern int mutex_lock_killable(struct mutex *lock); 7 extern void mutex_lock(struct mutex *lock); 8 extern int ldv_mutex_lock_interruptible_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 9 extern int ldv_mutex_lock_killable_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 10 extern void ldv_mutex_lock_nested_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock, unsigned int subclass); 11 extern void ldv_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 12 extern int ldv_mutex_trylock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 13 extern int ldv_atomic_dec_and_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(atomic_t *cnt, struct mutex *lock); 14 extern int ldv_mutex_is_locked_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 15 extern void ldv_mutex_unlock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock); 16 extern int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock); 17 extern int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock); 18 extern void ldv_mutex_lock_nested_i_mutex_of_inode(struct mutex *lock, unsigned int subclass); 19 extern void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock); 20 extern int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock); 21 extern int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock); 22 extern int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock); 23 extern void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock); 24 extern int ldv_mutex_lock_interruptible_lock(struct mutex *lock); 25 extern int ldv_mutex_lock_killable_lock(struct mutex *lock); 26 extern void ldv_mutex_lock_nested_lock(struct mutex *lock, unsigned int subclass); 27 extern void ldv_mutex_lock_lock(struct mutex *lock); 28 extern int ldv_mutex_trylock_lock(struct mutex *lock); 29 extern int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock); 30 extern int ldv_mutex_is_locked_lock(struct mutex *lock); 31 extern void ldv_mutex_unlock_lock(struct mutex *lock); 32 extern int ldv_mutex_lock_interruptible_mount_mutex_of_ceph_client(struct mutex *lock); 33 extern int ldv_mutex_lock_killable_mount_mutex_of_ceph_client(struct mutex *lock); 34 extern void ldv_mutex_lock_nested_mount_mutex_of_ceph_client(struct mutex *lock, unsigned int subclass); 35 extern void ldv_mutex_lock_mount_mutex_of_ceph_client(struct mutex *lock); 36 extern int ldv_mutex_trylock_mount_mutex_of_ceph_client(struct mutex *lock); 37 extern int ldv_atomic_dec_and_mutex_lock_mount_mutex_of_ceph_client(atomic_t *cnt, struct mutex *lock); 38 extern int ldv_mutex_is_locked_mount_mutex_of_ceph_client(struct mutex *lock); 39 extern void ldv_mutex_unlock_mount_mutex_of_ceph_client(struct mutex *lock); 40 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_auth_client(struct mutex *lock); 41 extern int ldv_mutex_lock_killable_mutex_of_ceph_auth_client(struct mutex *lock); 42 extern void ldv_mutex_lock_nested_mutex_of_ceph_auth_client(struct mutex *lock, unsigned int subclass); 43 extern void ldv_mutex_lock_mutex_of_ceph_auth_client(struct mutex *lock); 44 extern int ldv_mutex_trylock_mutex_of_ceph_auth_client(struct mutex *lock); 45 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_auth_client(atomic_t *cnt, struct mutex *lock); 46 extern int ldv_mutex_is_locked_mutex_of_ceph_auth_client(struct mutex *lock); 47 extern void ldv_mutex_unlock_mutex_of_ceph_auth_client(struct mutex *lock); 48 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_connection(struct mutex *lock); 49 extern int ldv_mutex_lock_killable_mutex_of_ceph_connection(struct mutex *lock); 50 extern void ldv_mutex_lock_nested_mutex_of_ceph_connection(struct mutex *lock, unsigned int subclass); 51 extern void ldv_mutex_lock_mutex_of_ceph_connection(struct mutex *lock); 52 extern int ldv_mutex_trylock_mutex_of_ceph_connection(struct mutex *lock); 53 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_connection(atomic_t *cnt, struct mutex *lock); 54 extern int ldv_mutex_is_locked_mutex_of_ceph_connection(struct mutex *lock); 55 extern void ldv_mutex_unlock_mutex_of_ceph_connection(struct mutex *lock); 56 extern int ldv_mutex_lock_interruptible_mutex_of_ceph_mon_client(struct mutex *lock); 57 extern int ldv_mutex_lock_killable_mutex_of_ceph_mon_client(struct mutex *lock); 58 extern void ldv_mutex_lock_nested_mutex_of_ceph_mon_client(struct mutex *lock, unsigned int subclass); 59 extern void ldv_mutex_lock_mutex_of_ceph_mon_client(struct mutex *lock); 60 extern int ldv_mutex_trylock_mutex_of_ceph_mon_client(struct mutex *lock); 61 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_mon_client(atomic_t *cnt, struct mutex *lock); 62 extern int ldv_mutex_is_locked_mutex_of_ceph_mon_client(struct mutex *lock); 63 extern void ldv_mutex_unlock_mutex_of_ceph_mon_client(struct mutex *lock); 64 extern int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock); 65 extern int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock); 66 extern void ldv_mutex_lock_nested_mutex_of_device(struct mutex *lock, unsigned int subclass); 67 extern void ldv_mutex_lock_mutex_of_device(struct mutex *lock); 68 extern int ldv_mutex_trylock_mutex_of_device(struct mutex *lock); 69 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock); 70 extern int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock); 71 extern void ldv_mutex_unlock_mutex_of_device(struct mutex *lock); 72 extern int ldv_mutex_lock_interruptible_request_mutex_of_ceph_osd_client(struct mutex *lock); 73 extern int ldv_mutex_lock_killable_request_mutex_of_ceph_osd_client(struct mutex *lock); 74 extern void ldv_mutex_lock_nested_request_mutex_of_ceph_osd_client(struct mutex *lock, unsigned int subclass); 75 extern void ldv_mutex_lock_request_mutex_of_ceph_osd_client(struct mutex *lock); 76 extern int ldv_mutex_trylock_request_mutex_of_ceph_osd_client(struct mutex *lock); 77 extern int ldv_atomic_dec_and_mutex_lock_request_mutex_of_ceph_osd_client(atomic_t *cnt, struct mutex *lock); 78 extern int ldv_mutex_is_locked_request_mutex_of_ceph_osd_client(struct mutex *lock); 79 extern void ldv_mutex_unlock_request_mutex_of_ceph_osd_client(struct mutex *lock); 80 81 82 #include <linux/ceph/ceph_debug.h> 83 84 #include <linux/module.h> 85 #include <linux/err.h> 86 #include <linux/highmem.h> 87 #include <linux/mm.h> 88 #include <linux/pagemap.h> 89 #include <linux/slab.h> 90 #include <linux/uaccess.h> 91 #ifdef CONFIG_BLOCK 92 #include <linux/bio.h> 93 #endif 94 95 #include <linux/ceph/libceph.h> 96 #include <linux/ceph/osd_client.h> 97 #include <linux/ceph/messenger.h> 98 #include <linux/ceph/decode.h> 99 #include <linux/ceph/auth.h> 100 #include <linux/ceph/pagelist.h> 101 102 #define OSD_OP_FRONT_LEN 4096 103 #define OSD_OPREPLY_FRONT_LEN 512 104 105 static struct kmem_cache *ceph_osd_request_cache; 106 107 static const struct ceph_connection_operations osd_con_ops; 108 109 static void __send_queued(struct ceph_osd_client *osdc); 110 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); 111 static void __register_request(struct ceph_osd_client *osdc, 112 struct ceph_osd_request *req); 113 static void __unregister_linger_request(struct ceph_osd_client *osdc, 114 struct ceph_osd_request *req); 115 static void __send_request(struct ceph_osd_client *osdc, 116 struct ceph_osd_request *req); 117 118 /* 119 * Implement client access to distributed object storage cluster. 120 * 121 * All data objects are stored within a cluster/cloud of OSDs, or 122 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 123 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 124 * remote daemons serving up and coordinating consistent and safe 125 * access to storage. 126 * 127 * Cluster membership and the mapping of data objects onto storage devices 128 * are described by the osd map. 129 * 130 * We keep track of pending OSD requests (read, write), resubmit 131 * requests to different OSDs when the cluster topology/data layout 132 * change, or retry the affected requests when the communications 133 * channel with an OSD is reset. 134 */ 135 136 /* 137 * calculate the mapping of a file extent onto an object, and fill out the 138 * request accordingly. shorten extent as necessary if it crosses an 139 * object boundary. 140 * 141 * fill osd op in request message. 142 */ 143 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 144 u64 *objnum, u64 *objoff, u64 *objlen) 145 { 146 u64 orig_len = *plen; 147 int r; 148 149 /* object extent? */ 150 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 151 objoff, objlen); 152 if (r < 0) 153 return r; 154 if (*objlen < orig_len) { 155 *plen = *objlen; 156 dout(" skipping last %llu, final file extent %llu~%llu\n", 157 orig_len - *plen, off, *plen); 158 } 159 160 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 161 162 return 0; 163 } 164 165 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 166 { 167 memset(osd_data, 0, sizeof (*osd_data)); 168 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 169 } 170 171 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 172 struct page **pages, u64 length, u32 alignment, 173 bool pages_from_pool, bool own_pages) 174 { 175 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 176 osd_data->pages = pages; 177 osd_data->length = length; 178 osd_data->alignment = alignment; 179 osd_data->pages_from_pool = pages_from_pool; 180 osd_data->own_pages = own_pages; 181 } 182 183 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 184 struct ceph_pagelist *pagelist) 185 { 186 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 187 osd_data->pagelist = pagelist; 188 } 189 190 #ifdef CONFIG_BLOCK 191 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 192 struct bio *bio, size_t bio_length) 193 { 194 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 195 osd_data->bio = bio; 196 osd_data->bio_length = bio_length; 197 } 198 #endif /* CONFIG_BLOCK */ 199 200 #define osd_req_op_data(oreq, whch, typ, fld) \ 201 ({ \ 202 BUG_ON(whch >= (oreq)->r_num_ops); \ 203 &(oreq)->r_ops[whch].typ.fld; \ 204 }) 205 206 static struct ceph_osd_data * 207 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 208 { 209 BUG_ON(which >= osd_req->r_num_ops); 210 211 return &osd_req->r_ops[which].raw_data_in; 212 } 213 214 struct ceph_osd_data * 215 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 216 unsigned int which) 217 { 218 return osd_req_op_data(osd_req, which, extent, osd_data); 219 } 220 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 221 222 struct ceph_osd_data * 223 osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, 224 unsigned int which) 225 { 226 return osd_req_op_data(osd_req, which, cls, response_data); 227 } 228 EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ 229 230 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 231 unsigned int which, struct page **pages, 232 u64 length, u32 alignment, 233 bool pages_from_pool, bool own_pages) 234 { 235 struct ceph_osd_data *osd_data; 236 237 osd_data = osd_req_op_raw_data_in(osd_req, which); 238 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 239 pages_from_pool, own_pages); 240 } 241 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 242 243 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 244 unsigned int which, struct page **pages, 245 u64 length, u32 alignment, 246 bool pages_from_pool, bool own_pages) 247 { 248 struct ceph_osd_data *osd_data; 249 250 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 251 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 252 pages_from_pool, own_pages); 253 } 254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 255 256 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, 257 unsigned int which, struct ceph_pagelist *pagelist) 258 { 259 struct ceph_osd_data *osd_data; 260 261 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 262 ceph_osd_data_pagelist_init(osd_data, pagelist); 263 } 264 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); 265 266 #ifdef CONFIG_BLOCK 267 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 268 unsigned int which, struct bio *bio, size_t bio_length) 269 { 270 struct ceph_osd_data *osd_data; 271 272 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 273 ceph_osd_data_bio_init(osd_data, bio, bio_length); 274 } 275 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 276 #endif /* CONFIG_BLOCK */ 277 278 static void osd_req_op_cls_request_info_pagelist( 279 struct ceph_osd_request *osd_req, 280 unsigned int which, struct ceph_pagelist *pagelist) 281 { 282 struct ceph_osd_data *osd_data; 283 284 osd_data = osd_req_op_data(osd_req, which, cls, request_info); 285 ceph_osd_data_pagelist_init(osd_data, pagelist); 286 } 287 288 void osd_req_op_cls_request_data_pagelist( 289 struct ceph_osd_request *osd_req, 290 unsigned int which, struct ceph_pagelist *pagelist) 291 { 292 struct ceph_osd_data *osd_data; 293 294 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 295 ceph_osd_data_pagelist_init(osd_data, pagelist); 296 } 297 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); 298 299 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 300 unsigned int which, struct page **pages, u64 length, 301 u32 alignment, bool pages_from_pool, bool own_pages) 302 { 303 struct ceph_osd_data *osd_data; 304 305 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 306 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 307 pages_from_pool, own_pages); 308 } 309 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 310 311 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 312 unsigned int which, struct page **pages, u64 length, 313 u32 alignment, bool pages_from_pool, bool own_pages) 314 { 315 struct ceph_osd_data *osd_data; 316 317 osd_data = osd_req_op_data(osd_req, which, cls, response_data); 318 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 319 pages_from_pool, own_pages); 320 } 321 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 322 323 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 324 { 325 switch (osd_data->type) { 326 case CEPH_OSD_DATA_TYPE_NONE: 327 return 0; 328 case CEPH_OSD_DATA_TYPE_PAGES: 329 return osd_data->length; 330 case CEPH_OSD_DATA_TYPE_PAGELIST: 331 return (u64)osd_data->pagelist->length; 332 #ifdef CONFIG_BLOCK 333 case CEPH_OSD_DATA_TYPE_BIO: 334 return (u64)osd_data->bio_length; 335 #endif /* CONFIG_BLOCK */ 336 default: 337 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 338 return 0; 339 } 340 } 341 342 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 343 { 344 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 345 int num_pages; 346 347 num_pages = calc_pages_for((u64)osd_data->alignment, 348 (u64)osd_data->length); 349 ceph_release_page_vector(osd_data->pages, num_pages); 350 } 351 ceph_osd_data_init(osd_data); 352 } 353 354 static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 355 unsigned int which) 356 { 357 struct ceph_osd_req_op *op; 358 359 BUG_ON(which >= osd_req->r_num_ops); 360 op = &osd_req->r_ops[which]; 361 362 switch (op->op) { 363 case CEPH_OSD_OP_READ: 364 case CEPH_OSD_OP_WRITE: 365 ceph_osd_data_release(&op->extent.osd_data); 366 break; 367 case CEPH_OSD_OP_CALL: 368 ceph_osd_data_release(&op->cls.request_info); 369 ceph_osd_data_release(&op->cls.request_data); 370 ceph_osd_data_release(&op->cls.response_data); 371 break; 372 default: 373 break; 374 } 375 } 376 377 /* 378 * requests 379 */ 380 static void ceph_osdc_release_request(struct kref *kref) 381 { 382 struct ceph_osd_request *req = container_of(kref, 383 struct ceph_osd_request, r_kref); 384 unsigned int which; 385 386 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, 387 req->r_request, req->r_reply); 388 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); 389 WARN_ON(!list_empty(&req->r_req_lru_item)); 390 WARN_ON(!list_empty(&req->r_osd_item)); 391 WARN_ON(!list_empty(&req->r_linger_item)); 392 WARN_ON(!list_empty(&req->r_linger_osd_item)); 393 WARN_ON(req->r_osd); 394 395 if (req->r_request) 396 ceph_msg_put(req->r_request); 397 if (req->r_reply) { 398 ceph_msg_revoke_incoming(req->r_reply); 399 ceph_msg_put(req->r_reply); 400 } 401 402 for (which = 0; which < req->r_num_ops; which++) 403 osd_req_op_data_release(req, which); 404 405 ceph_put_snap_context(req->r_snapc); 406 if (req->r_mempool) 407 mempool_free(req, req->r_osdc->req_mempool); 408 else 409 kmem_cache_free(ceph_osd_request_cache, req); 410 411 } 412 413 void ceph_osdc_get_request(struct ceph_osd_request *req) 414 { 415 dout("%s %p (was %d)\n", __func__, req, 416 atomic_read(&req->r_kref.refcount)); 417 kref_get(&req->r_kref); 418 } 419 EXPORT_SYMBOL(ceph_osdc_get_request); 420 421 void ceph_osdc_put_request(struct ceph_osd_request *req) 422 { 423 dout("%s %p (was %d)\n", __func__, req, 424 atomic_read(&req->r_kref.refcount)); 425 kref_put(&req->r_kref, ceph_osdc_release_request); 426 } 427 EXPORT_SYMBOL(ceph_osdc_put_request); 428 429 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 430 struct ceph_snap_context *snapc, 431 unsigned int num_ops, 432 bool use_mempool, 433 gfp_t gfp_flags) 434 { 435 struct ceph_osd_request *req; 436 struct ceph_msg *msg; 437 size_t msg_size; 438 439 BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); 440 BUG_ON(num_ops > CEPH_OSD_MAX_OP); 441 442 msg_size = 4 + 4 + 8 + 8 + 4+8; 443 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ 444 msg_size += 1 + 8 + 4 + 4; /* pg_t */ 445 msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */ 446 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); 447 msg_size += 8; /* snapid */ 448 msg_size += 8; /* snap_seq */ 449 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ 450 msg_size += 4; 451 452 if (use_mempool) { 453 req = mempool_alloc(osdc->req_mempool, gfp_flags); 454 memset(req, 0, sizeof(*req)); 455 } else { 456 req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags); 457 } 458 if (req == NULL) 459 return NULL; 460 461 req->r_osdc = osdc; 462 req->r_mempool = use_mempool; 463 req->r_num_ops = num_ops; 464 465 kref_init(&req->r_kref); 466 init_completion(&req->r_completion); 467 init_completion(&req->r_safe_completion); 468 RB_CLEAR_NODE(&req->r_node); 469 INIT_LIST_HEAD(&req->r_unsafe_item); 470 INIT_LIST_HEAD(&req->r_linger_item); 471 INIT_LIST_HEAD(&req->r_linger_osd_item); 472 INIT_LIST_HEAD(&req->r_req_lru_item); 473 INIT_LIST_HEAD(&req->r_osd_item); 474 475 req->r_base_oloc.pool = -1; 476 req->r_target_oloc.pool = -1; 477 478 /* create reply message */ 479 if (use_mempool) 480 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); 481 else 482 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, 483 OSD_OPREPLY_FRONT_LEN, gfp_flags, true); 484 if (!msg) { 485 ceph_osdc_put_request(req); 486 return NULL; 487 } 488 req->r_reply = msg; 489 490 /* create request message; allow space for oid */ 491 if (use_mempool) 492 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); 493 else 494 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true); 495 if (!msg) { 496 ceph_osdc_put_request(req); 497 return NULL; 498 } 499 500 memset(msg->front.iov_base, 0, msg->front.iov_len); 501 502 req->r_request = msg; 503 504 return req; 505 } 506 EXPORT_SYMBOL(ceph_osdc_alloc_request); 507 508 static bool osd_req_opcode_valid(u16 opcode) 509 { 510 switch (opcode) { 511 case CEPH_OSD_OP_READ: 512 case CEPH_OSD_OP_STAT: 513 case CEPH_OSD_OP_MAPEXT: 514 case CEPH_OSD_OP_MASKTRUNC: 515 case CEPH_OSD_OP_SPARSE_READ: 516 case CEPH_OSD_OP_NOTIFY: 517 case CEPH_OSD_OP_NOTIFY_ACK: 518 case CEPH_OSD_OP_ASSERT_VER: 519 case CEPH_OSD_OP_WRITE: 520 case CEPH_OSD_OP_WRITEFULL: 521 case CEPH_OSD_OP_TRUNCATE: 522 case CEPH_OSD_OP_ZERO: 523 case CEPH_OSD_OP_DELETE: 524 case CEPH_OSD_OP_APPEND: 525 case CEPH_OSD_OP_STARTSYNC: 526 case CEPH_OSD_OP_SETTRUNC: 527 case CEPH_OSD_OP_TRIMTRUNC: 528 case CEPH_OSD_OP_TMAPUP: 529 case CEPH_OSD_OP_TMAPPUT: 530 case CEPH_OSD_OP_TMAPGET: 531 case CEPH_OSD_OP_CREATE: 532 case CEPH_OSD_OP_ROLLBACK: 533 case CEPH_OSD_OP_WATCH: 534 case CEPH_OSD_OP_OMAPGETKEYS: 535 case CEPH_OSD_OP_OMAPGETVALS: 536 case CEPH_OSD_OP_OMAPGETHEADER: 537 case CEPH_OSD_OP_OMAPGETVALSBYKEYS: 538 case CEPH_OSD_OP_OMAPSETVALS: 539 case CEPH_OSD_OP_OMAPSETHEADER: 540 case CEPH_OSD_OP_OMAPCLEAR: 541 case CEPH_OSD_OP_OMAPRMKEYS: 542 case CEPH_OSD_OP_OMAP_CMP: 543 case CEPH_OSD_OP_SETALLOCHINT: 544 case CEPH_OSD_OP_CLONERANGE: 545 case CEPH_OSD_OP_ASSERT_SRC_VERSION: 546 case CEPH_OSD_OP_SRC_CMPXATTR: 547 case CEPH_OSD_OP_GETXATTR: 548 case CEPH_OSD_OP_GETXATTRS: 549 case CEPH_OSD_OP_CMPXATTR: 550 case CEPH_OSD_OP_SETXATTR: 551 case CEPH_OSD_OP_SETXATTRS: 552 case CEPH_OSD_OP_RESETXATTRS: 553 case CEPH_OSD_OP_RMXATTR: 554 case CEPH_OSD_OP_PULL: 555 case CEPH_OSD_OP_PUSH: 556 case CEPH_OSD_OP_BALANCEREADS: 557 case CEPH_OSD_OP_UNBALANCEREADS: 558 case CEPH_OSD_OP_SCRUB: 559 case CEPH_OSD_OP_SCRUB_RESERVE: 560 case CEPH_OSD_OP_SCRUB_UNRESERVE: 561 case CEPH_OSD_OP_SCRUB_STOP: 562 case CEPH_OSD_OP_SCRUB_MAP: 563 case CEPH_OSD_OP_WRLOCK: 564 case CEPH_OSD_OP_WRUNLOCK: 565 case CEPH_OSD_OP_RDLOCK: 566 case CEPH_OSD_OP_RDUNLOCK: 567 case CEPH_OSD_OP_UPLOCK: 568 case CEPH_OSD_OP_DNLOCK: 569 case CEPH_OSD_OP_CALL: 570 case CEPH_OSD_OP_PGLS: 571 case CEPH_OSD_OP_PGLS_FILTER: 572 return true; 573 default: 574 return false; 575 } 576 } 577 578 /* 579 * This is an osd op init function for opcodes that have no data or 580 * other information associated with them. It also serves as a 581 * common init routine for all the other init functions, below. 582 */ 583 static struct ceph_osd_req_op * 584 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 585 u16 opcode) 586 { 587 struct ceph_osd_req_op *op; 588 589 BUG_ON(which >= osd_req->r_num_ops); 590 BUG_ON(!osd_req_opcode_valid(opcode)); 591 592 op = &osd_req->r_ops[which]; 593 memset(op, 0, sizeof (*op)); 594 op->op = opcode; 595 596 return op; 597 } 598 599 void osd_req_op_init(struct ceph_osd_request *osd_req, 600 unsigned int which, u16 opcode) 601 { 602 (void)_osd_req_op_init(osd_req, which, opcode); 603 } 604 EXPORT_SYMBOL(osd_req_op_init); 605 606 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 607 unsigned int which, u16 opcode, 608 u64 offset, u64 length, 609 u64 truncate_size, u32 truncate_seq) 610 { 611 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); 612 size_t payload_len = 0; 613 614 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 615 opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && 616 opcode != CEPH_OSD_OP_TRUNCATE); 617 618 op->extent.offset = offset; 619 op->extent.length = length; 620 op->extent.truncate_size = truncate_size; 621 op->extent.truncate_seq = truncate_seq; 622 if (opcode == CEPH_OSD_OP_WRITE) 623 payload_len += length; 624 625 op->payload_len = payload_len; 626 } 627 EXPORT_SYMBOL(osd_req_op_extent_init); 628 629 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 630 unsigned int which, u64 length) 631 { 632 struct ceph_osd_req_op *op; 633 u64 previous; 634 635 BUG_ON(which >= osd_req->r_num_ops); 636 op = &osd_req->r_ops[which]; 637 previous = op->extent.length; 638 639 if (length == previous) 640 return; /* Nothing to do */ 641 BUG_ON(length > previous); 642 643 op->extent.length = length; 644 op->payload_len -= previous - length; 645 } 646 EXPORT_SYMBOL(osd_req_op_extent_update); 647 648 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 649 u16 opcode, const char *class, const char *method) 650 { 651 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); 652 struct ceph_pagelist *pagelist; 653 size_t payload_len = 0; 654 size_t size; 655 656 BUG_ON(opcode != CEPH_OSD_OP_CALL); 657 658 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); 659 BUG_ON(!pagelist); 660 ceph_pagelist_init(pagelist); 661 662 op->cls.class_name = class; 663 size = strlen(class); 664 BUG_ON(size > (size_t) U8_MAX); 665 op->cls.class_len = size; 666 ceph_pagelist_append(pagelist, class, size); 667 payload_len += size; 668 669 op->cls.method_name = method; 670 size = strlen(method); 671 BUG_ON(size > (size_t) U8_MAX); 672 op->cls.method_len = size; 673 ceph_pagelist_append(pagelist, method, size); 674 payload_len += size; 675 676 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 677 678 op->cls.argc = 0; /* currently unused */ 679 680 op->payload_len = payload_len; 681 } 682 EXPORT_SYMBOL(osd_req_op_cls_init); 683 684 void osd_req_op_watch_init(struct ceph_osd_request *osd_req, 685 unsigned int which, u16 opcode, 686 u64 cookie, u64 version, int flag) 687 { 688 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); 689 690 BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); 691 692 op->watch.cookie = cookie; 693 op->watch.ver = version; 694 if (opcode == CEPH_OSD_OP_WATCH && flag) 695 op->watch.flag = (u8)1; 696 } 697 EXPORT_SYMBOL(osd_req_op_watch_init); 698 699 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, 700 unsigned int which, 701 u64 expected_object_size, 702 u64 expected_write_size) 703 { 704 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 705 CEPH_OSD_OP_SETALLOCHINT); 706 707 op->alloc_hint.expected_object_size = expected_object_size; 708 op->alloc_hint.expected_write_size = expected_write_size; 709 710 /* 711 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed 712 * not worth a feature bit. Set FAILOK per-op flag to make 713 * sure older osds don't trip over an unsupported opcode. 714 */ 715 op->flags |= CEPH_OSD_OP_FLAG_FAILOK; 716 } 717 EXPORT_SYMBOL(osd_req_op_alloc_hint_init); 718 719 static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 720 struct ceph_osd_data *osd_data) 721 { 722 u64 length = ceph_osd_data_length(osd_data); 723 724 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 725 BUG_ON(length > (u64) SIZE_MAX); 726 if (length) 727 ceph_msg_data_add_pages(msg, osd_data->pages, 728 length, osd_data->alignment); 729 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 730 BUG_ON(!length); 731 ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 732 #ifdef CONFIG_BLOCK 733 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 734 ceph_msg_data_add_bio(msg, osd_data->bio, length); 735 #endif 736 } else { 737 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 738 } 739 } 740 741 static u64 osd_req_encode_op(struct ceph_osd_request *req, 742 struct ceph_osd_op *dst, unsigned int which) 743 { 744 struct ceph_osd_req_op *src; 745 struct ceph_osd_data *osd_data; 746 u64 request_data_len = 0; 747 u64 data_length; 748 749 BUG_ON(which >= req->r_num_ops); 750 src = &req->r_ops[which]; 751 if (WARN_ON(!osd_req_opcode_valid(src->op))) { 752 pr_err("unrecognized osd opcode %d\n", src->op); 753 754 return 0; 755 } 756 757 switch (src->op) { 758 case CEPH_OSD_OP_STAT: 759 osd_data = &src->raw_data_in; 760 ceph_osdc_msg_data_add(req->r_reply, osd_data); 761 break; 762 case CEPH_OSD_OP_READ: 763 case CEPH_OSD_OP_WRITE: 764 case CEPH_OSD_OP_ZERO: 765 case CEPH_OSD_OP_DELETE: 766 case CEPH_OSD_OP_TRUNCATE: 767 if (src->op == CEPH_OSD_OP_WRITE) 768 request_data_len = src->extent.length; 769 dst->extent.offset = cpu_to_le64(src->extent.offset); 770 dst->extent.length = cpu_to_le64(src->extent.length); 771 dst->extent.truncate_size = 772 cpu_to_le64(src->extent.truncate_size); 773 dst->extent.truncate_seq = 774 cpu_to_le32(src->extent.truncate_seq); 775 osd_data = &src->extent.osd_data; 776 if (src->op == CEPH_OSD_OP_WRITE) 777 ceph_osdc_msg_data_add(req->r_request, osd_data); 778 else 779 ceph_osdc_msg_data_add(req->r_reply, osd_data); 780 break; 781 case CEPH_OSD_OP_CALL: 782 dst->cls.class_len = src->cls.class_len; 783 dst->cls.method_len = src->cls.method_len; 784 osd_data = &src->cls.request_info; 785 ceph_osdc_msg_data_add(req->r_request, osd_data); 786 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST); 787 request_data_len = osd_data->pagelist->length; 788 789 osd_data = &src->cls.request_data; 790 data_length = ceph_osd_data_length(osd_data); 791 if (data_length) { 792 BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE); 793 dst->cls.indata_len = cpu_to_le32(data_length); 794 ceph_osdc_msg_data_add(req->r_request, osd_data); 795 src->payload_len += data_length; 796 request_data_len += data_length; 797 } 798 osd_data = &src->cls.response_data; 799 ceph_osdc_msg_data_add(req->r_reply, osd_data); 800 break; 801 case CEPH_OSD_OP_STARTSYNC: 802 break; 803 case CEPH_OSD_OP_NOTIFY_ACK: 804 case CEPH_OSD_OP_WATCH: 805 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 806 dst->watch.ver = cpu_to_le64(src->watch.ver); 807 dst->watch.flag = src->watch.flag; 808 break; 809 case CEPH_OSD_OP_SETALLOCHINT: 810 dst->alloc_hint.expected_object_size = 811 cpu_to_le64(src->alloc_hint.expected_object_size); 812 dst->alloc_hint.expected_write_size = 813 cpu_to_le64(src->alloc_hint.expected_write_size); 814 break; 815 default: 816 pr_err("unsupported osd opcode %s\n", 817 ceph_osd_op_name(src->op)); 818 WARN_ON(1); 819 820 return 0; 821 } 822 823 dst->op = cpu_to_le16(src->op); 824 dst->flags = cpu_to_le32(src->flags); 825 dst->payload_len = cpu_to_le32(src->payload_len); 826 827 return request_data_len; 828 } 829 830 /* 831 * build new request AND message, calculate layout, and adjust file 832 * extent as needed. 833 * 834 * if the file was recently truncated, we include information about its 835 * old and new size so that the object can be updated appropriately. (we 836 * avoid synchronously deleting truncated objects because it's slow.) 837 * 838 * if @do_sync, include a 'startsync' command so that the osd will flush 839 * data quickly. 840 */ 841 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 842 struct ceph_file_layout *layout, 843 struct ceph_vino vino, 844 u64 off, u64 *plen, int num_ops, 845 int opcode, int flags, 846 struct ceph_snap_context *snapc, 847 u32 truncate_seq, 848 u64 truncate_size, 849 bool use_mempool) 850 { 851 struct ceph_osd_request *req; 852 u64 objnum = 0; 853 u64 objoff = 0; 854 u64 objlen = 0; 855 u32 object_size; 856 u64 object_base; 857 int r; 858 859 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 860 opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && 861 opcode != CEPH_OSD_OP_TRUNCATE); 862 863 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 864 GFP_NOFS); 865 if (!req) 866 return ERR_PTR(-ENOMEM); 867 868 req->r_flags = flags; 869 870 /* calculate max write size */ 871 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 872 if (r < 0) { 873 ceph_osdc_put_request(req); 874 return ERR_PTR(r); 875 } 876 877 object_size = le32_to_cpu(layout->fl_object_size); 878 object_base = off - objoff; 879 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 880 if (truncate_size <= object_base) { 881 truncate_size = 0; 882 } else { 883 truncate_size -= object_base; 884 if (truncate_size > object_size) 885 truncate_size = object_size; 886 } 887 } 888 889 osd_req_op_extent_init(req, 0, opcode, objoff, objlen, 890 truncate_size, truncate_seq); 891 892 /* 893 * A second op in the ops array means the caller wants to 894 * also issue a include a 'startsync' command so that the 895 * osd will flush data quickly. 896 */ 897 if (num_ops > 1) 898 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); 899 900 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout); 901 902 snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name), 903 "%llx.%08llx", vino.ino, objnum); 904 req->r_base_oid.name_len = strlen(req->r_base_oid.name); 905 906 return req; 907 } 908 EXPORT_SYMBOL(ceph_osdc_new_request); 909 910 /* 911 * We keep osd requests in an rbtree, sorted by ->r_tid. 912 */ 913 static void __insert_request(struct ceph_osd_client *osdc, 914 struct ceph_osd_request *new) 915 { 916 struct rb_node **p = &osdc->requests.rb_node; 917 struct rb_node *parent = NULL; 918 struct ceph_osd_request *req = NULL; 919 920 while (*p) { 921 parent = *p; 922 req = rb_entry(parent, struct ceph_osd_request, r_node); 923 if (new->r_tid < req->r_tid) 924 p = &(*p)->rb_left; 925 else if (new->r_tid > req->r_tid) 926 p = &(*p)->rb_right; 927 else 928 BUG(); 929 } 930 931 rb_link_node(&new->r_node, parent, p); 932 rb_insert_color(&new->r_node, &osdc->requests); 933 } 934 935 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, 936 u64 tid) 937 { 938 struct ceph_osd_request *req; 939 struct rb_node *n = osdc->requests.rb_node; 940 941 while (n) { 942 req = rb_entry(n, struct ceph_osd_request, r_node); 943 if (tid < req->r_tid) 944 n = n->rb_left; 945 else if (tid > req->r_tid) 946 n = n->rb_right; 947 else 948 return req; 949 } 950 return NULL; 951 } 952 953 static struct ceph_osd_request * 954 __lookup_request_ge(struct ceph_osd_client *osdc, 955 u64 tid) 956 { 957 struct ceph_osd_request *req; 958 struct rb_node *n = osdc->requests.rb_node; 959 960 while (n) { 961 req = rb_entry(n, struct ceph_osd_request, r_node); 962 if (tid < req->r_tid) { 963 if (!n->rb_left) 964 return req; 965 n = n->rb_left; 966 } else if (tid > req->r_tid) { 967 n = n->rb_right; 968 } else { 969 return req; 970 } 971 } 972 return NULL; 973 } 974 975 /* 976 * Resubmit requests pending on the given osd. 977 */ 978 static void __kick_osd_requests(struct ceph_osd_client *osdc, 979 struct ceph_osd *osd) 980 { 981 struct ceph_osd_request *req, *nreq; 982 LIST_HEAD(resend); 983 int err; 984 985 dout("__kick_osd_requests osd%d\n", osd->o_osd); 986 err = __reset_osd(osdc, osd); 987 if (err) 988 return; 989 /* 990 * Build up a list of requests to resend by traversing the 991 * osd's list of requests. Requests for a given object are 992 * sent in tid order, and that is also the order they're 993 * kept on this list. Therefore all requests that are in 994 * flight will be found first, followed by all requests that 995 * have not yet been sent. And to resend requests while 996 * preserving this order we will want to put any sent 997 * requests back on the front of the osd client's unsent 998 * list. 999 * 1000 * So we build a separate ordered list of already-sent 1001 * requests for the affected osd and splice it onto the 1002 * front of the osd client's unsent list. Once we've seen a 1003 * request that has not yet been sent we're done. Those 1004 * requests are already sitting right where they belong. 1005 */ 1006 list_for_each_entry(req, &osd->o_requests, r_osd_item) { 1007 if (!req->r_sent) 1008 break; 1009 list_move_tail(&req->r_req_lru_item, &resend); 1010 dout("requeueing %p tid %llu osd%d\n", req, req->r_tid, 1011 osd->o_osd); 1012 if (!req->r_linger) 1013 req->r_flags |= CEPH_OSD_FLAG_RETRY; 1014 } 1015 list_splice(&resend, &osdc->req_unsent); 1016 1017 /* 1018 * Linger requests are re-registered before sending, which 1019 * sets up a new tid for each. We add them to the unsent 1020 * list at the end to keep things in tid order. 1021 */ 1022 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, 1023 r_linger_osd_item) { 1024 /* 1025 * reregister request prior to unregistering linger so 1026 * that r_osd is preserved. 1027 */ 1028 BUG_ON(!list_empty(&req->r_req_lru_item)); 1029 __register_request(osdc, req); 1030 list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); 1031 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); 1032 __unregister_linger_request(osdc, req); 1033 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, 1034 osd->o_osd); 1035 } 1036 } 1037 1038 /* 1039 * If the osd connection drops, we need to resubmit all requests. 1040 */ 1041 static void osd_reset(struct ceph_connection *con) 1042 { 1043 struct ceph_osd *osd = con->private; 1044 struct ceph_osd_client *osdc; 1045 1046 if (!osd) 1047 return; 1048 dout("osd_reset osd%d\n", osd->o_osd); 1049 osdc = osd->o_osdc; 1050 down_read(&osdc->map_sem); 1051 mutex_lock(&osdc->request_mutex); 1052 __kick_osd_requests(osdc, osd); 1053 __send_queued(osdc); 1054 mutex_unlock(&osdc->request_mutex); 1055 up_read(&osdc->map_sem); 1056 } 1057 1058 /* 1059 * Track open sessions with osds. 1060 */ 1061 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 1062 { 1063 struct ceph_osd *osd; 1064 1065 osd = kzalloc(sizeof(*osd), GFP_NOFS); 1066 if (!osd) 1067 return NULL; 1068 1069 atomic_set(&osd->o_ref, 1); 1070 osd->o_osdc = osdc; 1071 osd->o_osd = onum; 1072 RB_CLEAR_NODE(&osd->o_node); 1073 INIT_LIST_HEAD(&osd->o_requests); 1074 INIT_LIST_HEAD(&osd->o_linger_requests); 1075 INIT_LIST_HEAD(&osd->o_osd_lru); 1076 osd->o_incarnation = 1; 1077 1078 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 1079 1080 INIT_LIST_HEAD(&osd->o_keepalive_item); 1081 return osd; 1082 } 1083 1084 static struct ceph_osd *get_osd(struct ceph_osd *osd) 1085 { 1086 if (atomic_inc_not_zero(&osd->o_ref)) { 1087 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, 1088 atomic_read(&osd->o_ref)); 1089 return osd; 1090 } else { 1091 dout("get_osd %p FAIL\n", osd); 1092 return NULL; 1093 } 1094 } 1095 1096 static void put_osd(struct ceph_osd *osd) 1097 { 1098 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 1099 atomic_read(&osd->o_ref) - 1); 1100 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { 1101 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; 1102 1103 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 1104 kfree(osd); 1105 } 1106 } 1107 1108 /* 1109 * remove an osd from our map 1110 */ 1111 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 1112 { 1113 dout("__remove_osd %p\n", osd); 1114 BUG_ON(!list_empty(&osd->o_requests)); 1115 BUG_ON(!list_empty(&osd->o_linger_requests)); 1116 1117 rb_erase(&osd->o_node, &osdc->osds); 1118 list_del_init(&osd->o_osd_lru); 1119 ceph_con_close(&osd->o_con); 1120 put_osd(osd); 1121 } 1122 1123 static void remove_all_osds(struct ceph_osd_client *osdc) 1124 { 1125 dout("%s %p\n", __func__, osdc); 1126 mutex_lock(&osdc->request_mutex); 1127 while (!RB_EMPTY_ROOT(&osdc->osds)) { 1128 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 1129 struct ceph_osd, o_node); 1130 __remove_osd(osdc, osd); 1131 } 1132 mutex_unlock(&osdc->request_mutex); 1133 } 1134 1135 static void __move_osd_to_lru(struct ceph_osd_client *osdc, 1136 struct ceph_osd *osd) 1137 { 1138 dout("%s %p\n", __func__, osd); 1139 BUG_ON(!list_empty(&osd->o_osd_lru)); 1140 1141 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 1142 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; 1143 } 1144 1145 static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc, 1146 struct ceph_osd *osd) 1147 { 1148 dout("%s %p\n", __func__, osd); 1149 1150 if (list_empty(&osd->o_requests) && 1151 list_empty(&osd->o_linger_requests)) 1152 __move_osd_to_lru(osdc, osd); 1153 } 1154 1155 static void __remove_osd_from_lru(struct ceph_osd *osd) 1156 { 1157 dout("__remove_osd_from_lru %p\n", osd); 1158 if (!list_empty(&osd->o_osd_lru)) 1159 list_del_init(&osd->o_osd_lru); 1160 } 1161 1162 static void remove_old_osds(struct ceph_osd_client *osdc) 1163 { 1164 struct ceph_osd *osd, *nosd; 1165 1166 dout("__remove_old_osds %p\n", osdc); 1167 mutex_lock(&osdc->request_mutex); 1168 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 1169 if (time_before(jiffies, osd->lru_ttl)) 1170 break; 1171 __remove_osd(osdc, osd); 1172 } 1173 mutex_unlock(&osdc->request_mutex); 1174 } 1175 1176 /* 1177 * reset osd connect 1178 */ 1179 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 1180 { 1181 struct ceph_entity_addr *peer_addr; 1182 1183 dout("__reset_osd %p osd%d\n", osd, osd->o_osd); 1184 if (list_empty(&osd->o_requests) && 1185 list_empty(&osd->o_linger_requests)) { 1186 __remove_osd(osdc, osd); 1187 1188 return -ENODEV; 1189 } 1190 1191 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd]; 1192 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 1193 !ceph_con_opened(&osd->o_con)) { 1194 struct ceph_osd_request *req; 1195 1196 dout("osd addr hasn't changed and connection never opened, " 1197 "letting msgr retry\n"); 1198 /* touch each r_stamp for handle_timeout()'s benfit */ 1199 list_for_each_entry(req, &osd->o_requests, r_osd_item) 1200 req->r_stamp = jiffies; 1201 1202 return -EAGAIN; 1203 } 1204 1205 ceph_con_close(&osd->o_con); 1206 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 1207 osd->o_incarnation++; 1208 1209 return 0; 1210 } 1211 1212 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) 1213 { 1214 struct rb_node **p = &osdc->osds.rb_node; 1215 struct rb_node *parent = NULL; 1216 struct ceph_osd *osd = NULL; 1217 1218 dout("__insert_osd %p osd%d\n", new, new->o_osd); 1219 while (*p) { 1220 parent = *p; 1221 osd = rb_entry(parent, struct ceph_osd, o_node); 1222 if (new->o_osd < osd->o_osd) 1223 p = &(*p)->rb_left; 1224 else if (new->o_osd > osd->o_osd) 1225 p = &(*p)->rb_right; 1226 else 1227 BUG(); 1228 } 1229 1230 rb_link_node(&new->o_node, parent, p); 1231 rb_insert_color(&new->o_node, &osdc->osds); 1232 } 1233 1234 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) 1235 { 1236 struct ceph_osd *osd; 1237 struct rb_node *n = osdc->osds.rb_node; 1238 1239 while (n) { 1240 osd = rb_entry(n, struct ceph_osd, o_node); 1241 if (o < osd->o_osd) 1242 n = n->rb_left; 1243 else if (o > osd->o_osd) 1244 n = n->rb_right; 1245 else 1246 return osd; 1247 } 1248 return NULL; 1249 } 1250 1251 static void __schedule_osd_timeout(struct ceph_osd_client *osdc) 1252 { 1253 schedule_delayed_work(&osdc->timeout_work, 1254 osdc->client->options->osd_keepalive_timeout * HZ); 1255 } 1256 1257 static void __cancel_osd_timeout(struct ceph_osd_client *osdc) 1258 { 1259 cancel_delayed_work(&osdc->timeout_work); 1260 } 1261 1262 /* 1263 * Register request, assign tid. If this is the first request, set up 1264 * the timeout event. 1265 */ 1266 static void __register_request(struct ceph_osd_client *osdc, 1267 struct ceph_osd_request *req) 1268 { 1269 req->r_tid = ++osdc->last_tid; 1270 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 1271 dout("__register_request %p tid %lld\n", req, req->r_tid); 1272 __insert_request(osdc, req); 1273 ceph_osdc_get_request(req); 1274 osdc->num_requests++; 1275 if (osdc->num_requests == 1) { 1276 dout(" first request, scheduling timeout\n"); 1277 __schedule_osd_timeout(osdc); 1278 } 1279 } 1280 1281 /* 1282 * called under osdc->request_mutex 1283 */ 1284 static void __unregister_request(struct ceph_osd_client *osdc, 1285 struct ceph_osd_request *req) 1286 { 1287 if (RB_EMPTY_NODE(&req->r_node)) { 1288 dout("__unregister_request %p tid %lld not registered\n", 1289 req, req->r_tid); 1290 return; 1291 } 1292 1293 dout("__unregister_request %p tid %lld\n", req, req->r_tid); 1294 rb_erase(&req->r_node, &osdc->requests); 1295 RB_CLEAR_NODE(&req->r_node); 1296 osdc->num_requests--; 1297 1298 if (req->r_osd) { 1299 /* make sure the original request isn't in flight. */ 1300 ceph_msg_revoke(req->r_request); 1301 1302 list_del_init(&req->r_osd_item); 1303 maybe_move_osd_to_lru(osdc, req->r_osd); 1304 if (list_empty(&req->r_linger_osd_item)) 1305 req->r_osd = NULL; 1306 } 1307 1308 list_del_init(&req->r_req_lru_item); 1309 ceph_osdc_put_request(req); 1310 1311 if (osdc->num_requests == 0) { 1312 dout(" no requests, canceling timeout\n"); 1313 __cancel_osd_timeout(osdc); 1314 } 1315 } 1316 1317 /* 1318 * Cancel a previously queued request message 1319 */ 1320 static void __cancel_request(struct ceph_osd_request *req) 1321 { 1322 if (req->r_sent && req->r_osd) { 1323 ceph_msg_revoke(req->r_request); 1324 req->r_sent = 0; 1325 } 1326 } 1327 1328 static void __register_linger_request(struct ceph_osd_client *osdc, 1329 struct ceph_osd_request *req) 1330 { 1331 dout("%s %p tid %llu\n", __func__, req, req->r_tid); 1332 WARN_ON(!req->r_linger); 1333 1334 ceph_osdc_get_request(req); 1335 list_add_tail(&req->r_linger_item, &osdc->req_linger); 1336 if (req->r_osd) 1337 list_add_tail(&req->r_linger_osd_item, 1338 &req->r_osd->o_linger_requests); 1339 } 1340 1341 static void __unregister_linger_request(struct ceph_osd_client *osdc, 1342 struct ceph_osd_request *req) 1343 { 1344 WARN_ON(!req->r_linger); 1345 1346 if (list_empty(&req->r_linger_item)) { 1347 dout("%s %p tid %llu not registered\n", __func__, req, 1348 req->r_tid); 1349 return; 1350 } 1351 1352 dout("%s %p tid %llu\n", __func__, req, req->r_tid); 1353 list_del_init(&req->r_linger_item); 1354 1355 if (req->r_osd) { 1356 list_del_init(&req->r_linger_osd_item); 1357 maybe_move_osd_to_lru(osdc, req->r_osd); 1358 if (list_empty(&req->r_osd_item)) 1359 req->r_osd = NULL; 1360 } 1361 ceph_osdc_put_request(req); 1362 } 1363 1364 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, 1365 struct ceph_osd_request *req) 1366 { 1367 if (!req->r_linger) { 1368 dout("set_request_linger %p\n", req); 1369 req->r_linger = 1; 1370 } 1371 } 1372 EXPORT_SYMBOL(ceph_osdc_set_request_linger); 1373 1374 /* 1375 * Returns whether a request should be blocked from being sent 1376 * based on the current osdmap and osd_client settings. 1377 * 1378 * Caller should hold map_sem for read. 1379 */ 1380 static bool __req_should_be_paused(struct ceph_osd_client *osdc, 1381 struct ceph_osd_request *req) 1382 { 1383 bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 1384 bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 1385 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 1386 return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) || 1387 (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr); 1388 } 1389 1390 /* 1391 * Calculate mapping of a request to a PG. Takes tiering into account. 1392 */ 1393 static int __calc_request_pg(struct ceph_osdmap *osdmap, 1394 struct ceph_osd_request *req, 1395 struct ceph_pg *pg_out) 1396 { 1397 bool need_check_tiering; 1398 1399 need_check_tiering = false; 1400 if (req->r_target_oloc.pool == -1) { 1401 req->r_target_oloc = req->r_base_oloc; /* struct */ 1402 need_check_tiering = true; 1403 } 1404 if (req->r_target_oid.name_len == 0) { 1405 ceph_oid_copy(&req->r_target_oid, &req->r_base_oid); 1406 need_check_tiering = true; 1407 } 1408 1409 if (need_check_tiering && 1410 (req->r_flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { 1411 struct ceph_pg_pool_info *pi; 1412 1413 pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool); 1414 if (pi) { 1415 if ((req->r_flags & CEPH_OSD_FLAG_READ) && 1416 pi->read_tier >= 0) 1417 req->r_target_oloc.pool = pi->read_tier; 1418 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1419 pi->write_tier >= 0) 1420 req->r_target_oloc.pool = pi->write_tier; 1421 } 1422 /* !pi is caught in ceph_oloc_oid_to_pg() */ 1423 } 1424 1425 return ceph_oloc_oid_to_pg(osdmap, &req->r_target_oloc, 1426 &req->r_target_oid, pg_out); 1427 } 1428 1429 /* 1430 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct 1431 * (as needed), and set the request r_osd appropriately. If there is 1432 * no up osd, set r_osd to NULL. Move the request to the appropriate list 1433 * (unsent, homeless) or leave on in-flight lru. 1434 * 1435 * Return 0 if unchanged, 1 if changed, or negative on error. 1436 * 1437 * Caller should hold map_sem for read and request_mutex. 1438 */ 1439 static int __map_request(struct ceph_osd_client *osdc, 1440 struct ceph_osd_request *req, int force_resend) 1441 { 1442 struct ceph_pg pgid; 1443 int acting[CEPH_PG_MAX_SIZE]; 1444 int num, o; 1445 int err; 1446 bool was_paused; 1447 1448 dout("map_request %p tid %lld\n", req, req->r_tid); 1449 1450 err = __calc_request_pg(osdc->osdmap, req, &pgid); 1451 if (err) { 1452 list_move(&req->r_req_lru_item, &osdc->req_notarget); 1453 return err; 1454 } 1455 req->r_pgid = pgid; 1456 1457 num = ceph_calc_pg_acting(osdc->osdmap, pgid, acting, &o); 1458 if (num < 0) 1459 num = 0; 1460 1461 was_paused = req->r_paused; 1462 req->r_paused = __req_should_be_paused(osdc, req); 1463 if (was_paused && !req->r_paused) 1464 force_resend = 1; 1465 1466 if ((!force_resend && 1467 req->r_osd && req->r_osd->o_osd == o && 1468 req->r_sent >= req->r_osd->o_incarnation && 1469 req->r_num_pg_osds == num && 1470 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || 1471 (req->r_osd == NULL && o == -1) || 1472 req->r_paused) 1473 return 0; /* no change */ 1474 1475 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n", 1476 req->r_tid, pgid.pool, pgid.seed, o, 1477 req->r_osd ? req->r_osd->o_osd : -1); 1478 1479 /* record full pg acting set */ 1480 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); 1481 req->r_num_pg_osds = num; 1482 1483 if (req->r_osd) { 1484 __cancel_request(req); 1485 list_del_init(&req->r_osd_item); 1486 req->r_osd = NULL; 1487 } 1488 1489 req->r_osd = __lookup_osd(osdc, o); 1490 if (!req->r_osd && o >= 0) { 1491 err = -ENOMEM; 1492 req->r_osd = create_osd(osdc, o); 1493 if (!req->r_osd) { 1494 list_move(&req->r_req_lru_item, &osdc->req_notarget); 1495 goto out; 1496 } 1497 1498 dout("map_request osd %p is osd%d\n", req->r_osd, o); 1499 __insert_osd(osdc, req->r_osd); 1500 1501 ceph_con_open(&req->r_osd->o_con, 1502 CEPH_ENTITY_TYPE_OSD, o, 1503 &osdc->osdmap->osd_addr[o]); 1504 } 1505 1506 if (req->r_osd) { 1507 __remove_osd_from_lru(req->r_osd); 1508 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); 1509 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); 1510 } else { 1511 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); 1512 } 1513 err = 1; /* osd or pg changed */ 1514 1515 out: 1516 return err; 1517 } 1518 1519 /* 1520 * caller should hold map_sem (for read) and request_mutex 1521 */ 1522 static void __send_request(struct ceph_osd_client *osdc, 1523 struct ceph_osd_request *req) 1524 { 1525 void *p; 1526 1527 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n", 1528 req, req->r_tid, req->r_osd->o_osd, req->r_flags, 1529 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); 1530 1531 /* fill in message content that changes each time we send it */ 1532 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch); 1533 put_unaligned_le32(req->r_flags, req->r_request_flags); 1534 put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool); 1535 p = req->r_request_pgid; 1536 ceph_encode_64(&p, req->r_pgid.pool); 1537 ceph_encode_32(&p, req->r_pgid.seed); 1538 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */ 1539 memcpy(req->r_request_reassert_version, &req->r_reassert_version, 1540 sizeof(req->r_reassert_version)); 1541 1542 req->r_stamp = jiffies; 1543 list_move_tail(&req->r_req_lru_item, &osdc->req_lru); 1544 1545 ceph_msg_get(req->r_request); /* send consumes a ref */ 1546 1547 req->r_sent = req->r_osd->o_incarnation; 1548 1549 ceph_con_send(&req->r_osd->o_con, req->r_request); 1550 } 1551 1552 /* 1553 * Send any requests in the queue (req_unsent). 1554 */ 1555 static void __send_queued(struct ceph_osd_client *osdc) 1556 { 1557 struct ceph_osd_request *req, *tmp; 1558 1559 dout("__send_queued\n"); 1560 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) 1561 __send_request(osdc, req); 1562 } 1563 1564 /* 1565 * Caller should hold map_sem for read and request_mutex. 1566 */ 1567 static int __ceph_osdc_start_request(struct ceph_osd_client *osdc, 1568 struct ceph_osd_request *req, 1569 bool nofail) 1570 { 1571 int rc; 1572 1573 __register_request(osdc, req); 1574 req->r_sent = 0; 1575 req->r_got_reply = 0; 1576 rc = __map_request(osdc, req, 0); 1577 if (rc < 0) { 1578 if (nofail) { 1579 dout("osdc_start_request failed map, " 1580 " will retry %lld\n", req->r_tid); 1581 rc = 0; 1582 } else { 1583 __unregister_request(osdc, req); 1584 } 1585 return rc; 1586 } 1587 1588 if (req->r_osd == NULL) { 1589 dout("send_request %p no up osds in pg\n", req); 1590 ceph_monc_request_next_osdmap(&osdc->client->monc); 1591 } else { 1592 __send_queued(osdc); 1593 } 1594 1595 return 0; 1596 } 1597 1598 /* 1599 * Timeout callback, called every N seconds when 1 or more osd 1600 * requests has been active for more than N seconds. When this 1601 * happens, we ping all OSDs with requests who have timed out to 1602 * ensure any communications channel reset is detected. Reset the 1603 * request timeouts another N seconds in the future as we go. 1604 * Reschedule the timeout event another N seconds in future (unless 1605 * there are no open requests). 1606 */ 1607 static void handle_timeout(struct work_struct *work) 1608 { 1609 struct ceph_osd_client *osdc = 1610 container_of(work, struct ceph_osd_client, timeout_work.work); 1611 struct ceph_osd_request *req; 1612 struct ceph_osd *osd; 1613 unsigned long keepalive = 1614 osdc->client->options->osd_keepalive_timeout * HZ; 1615 struct list_head slow_osds; 1616 dout("timeout\n"); 1617 down_read(&osdc->map_sem); 1618 1619 ceph_monc_request_next_osdmap(&osdc->client->monc); 1620 1621 mutex_lock(&osdc->request_mutex); 1622 1623 /* 1624 * ping osds that are a bit slow. this ensures that if there 1625 * is a break in the TCP connection we will notice, and reopen 1626 * a connection with that osd (from the fault callback). 1627 */ 1628 INIT_LIST_HEAD(&slow_osds); 1629 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { 1630 if (time_before(jiffies, req->r_stamp + keepalive)) 1631 break; 1632 1633 osd = req->r_osd; 1634 BUG_ON(!osd); 1635 dout(" tid %llu is slow, will send keepalive on osd%d\n", 1636 req->r_tid, osd->o_osd); 1637 list_move_tail(&osd->o_keepalive_item, &slow_osds); 1638 } 1639 while (!list_empty(&slow_osds)) { 1640 osd = list_entry(slow_osds.next, struct ceph_osd, 1641 o_keepalive_item); 1642 list_del_init(&osd->o_keepalive_item); 1643 ceph_con_keepalive(&osd->o_con); 1644 } 1645 1646 __schedule_osd_timeout(osdc); 1647 __send_queued(osdc); 1648 mutex_unlock(&osdc->request_mutex); 1649 up_read(&osdc->map_sem); 1650 } 1651 1652 static void handle_osds_timeout(struct work_struct *work) 1653 { 1654 struct ceph_osd_client *osdc = 1655 container_of(work, struct ceph_osd_client, 1656 osds_timeout_work.work); 1657 unsigned long delay = 1658 osdc->client->options->osd_idle_ttl * HZ >> 2; 1659 1660 dout("osds timeout\n"); 1661 down_read(&osdc->map_sem); 1662 remove_old_osds(osdc); 1663 up_read(&osdc->map_sem); 1664 1665 schedule_delayed_work(&osdc->osds_timeout_work, 1666 round_jiffies_relative(delay)); 1667 } 1668 1669 static int ceph_oloc_decode(void **p, void *end, 1670 struct ceph_object_locator *oloc) 1671 { 1672 u8 struct_v, struct_cv; 1673 u32 len; 1674 void *struct_end; 1675 int ret = 0; 1676 1677 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 1678 struct_v = ceph_decode_8(p); 1679 struct_cv = ceph_decode_8(p); 1680 if (struct_v < 3) { 1681 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", 1682 struct_v, struct_cv); 1683 goto e_inval; 1684 } 1685 if (struct_cv > 6) { 1686 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", 1687 struct_v, struct_cv); 1688 goto e_inval; 1689 } 1690 len = ceph_decode_32(p); 1691 ceph_decode_need(p, end, len, e_inval); 1692 struct_end = *p + len; 1693 1694 oloc->pool = ceph_decode_64(p); 1695 *p += 4; /* skip preferred */ 1696 1697 len = ceph_decode_32(p); 1698 if (len > 0) { 1699 pr_warn("ceph_object_locator::key is set\n"); 1700 goto e_inval; 1701 } 1702 1703 if (struct_v >= 5) { 1704 len = ceph_decode_32(p); 1705 if (len > 0) { 1706 pr_warn("ceph_object_locator::nspace is set\n"); 1707 goto e_inval; 1708 } 1709 } 1710 1711 if (struct_v >= 6) { 1712 s64 hash = ceph_decode_64(p); 1713 if (hash != -1) { 1714 pr_warn("ceph_object_locator::hash is set\n"); 1715 goto e_inval; 1716 } 1717 } 1718 1719 /* skip the rest */ 1720 *p = struct_end; 1721 out: 1722 return ret; 1723 1724 e_inval: 1725 ret = -EINVAL; 1726 goto out; 1727 } 1728 1729 static int ceph_redirect_decode(void **p, void *end, 1730 struct ceph_request_redirect *redir) 1731 { 1732 u8 struct_v, struct_cv; 1733 u32 len; 1734 void *struct_end; 1735 int ret; 1736 1737 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 1738 struct_v = ceph_decode_8(p); 1739 struct_cv = ceph_decode_8(p); 1740 if (struct_cv > 1) { 1741 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", 1742 struct_v, struct_cv); 1743 goto e_inval; 1744 } 1745 len = ceph_decode_32(p); 1746 ceph_decode_need(p, end, len, e_inval); 1747 struct_end = *p + len; 1748 1749 ret = ceph_oloc_decode(p, end, &redir->oloc); 1750 if (ret) 1751 goto out; 1752 1753 len = ceph_decode_32(p); 1754 if (len > 0) { 1755 pr_warn("ceph_request_redirect::object_name is set\n"); 1756 goto e_inval; 1757 } 1758 1759 len = ceph_decode_32(p); 1760 *p += len; /* skip osd_instructions */ 1761 1762 /* skip the rest */ 1763 *p = struct_end; 1764 out: 1765 return ret; 1766 1767 e_inval: 1768 ret = -EINVAL; 1769 goto out; 1770 } 1771 1772 static void complete_request(struct ceph_osd_request *req) 1773 { 1774 complete_all(&req->r_safe_completion); /* fsync waiter */ 1775 } 1776 1777 /* 1778 * handle osd op reply. either call the callback if it is specified, 1779 * or do the completion to wake up the waiting thread. 1780 */ 1781 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, 1782 struct ceph_connection *con) 1783 { 1784 void *p, *end; 1785 struct ceph_osd_request *req; 1786 struct ceph_request_redirect redir; 1787 u64 tid; 1788 int object_len; 1789 unsigned int numops; 1790 int payload_len, flags; 1791 s32 result; 1792 s32 retry_attempt; 1793 struct ceph_pg pg; 1794 int err; 1795 u32 reassert_epoch; 1796 u64 reassert_version; 1797 u32 osdmap_epoch; 1798 int already_completed; 1799 u32 bytes; 1800 unsigned int i; 1801 1802 tid = le64_to_cpu(msg->hdr.tid); 1803 dout("handle_reply %p tid %llu\n", msg, tid); 1804 1805 p = msg->front.iov_base; 1806 end = p + msg->front.iov_len; 1807 1808 ceph_decode_need(&p, end, 4, bad); 1809 object_len = ceph_decode_32(&p); 1810 ceph_decode_need(&p, end, object_len, bad); 1811 p += object_len; 1812 1813 err = ceph_decode_pgid(&p, end, &pg); 1814 if (err) 1815 goto bad; 1816 1817 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad); 1818 flags = ceph_decode_64(&p); 1819 result = ceph_decode_32(&p); 1820 reassert_epoch = ceph_decode_32(&p); 1821 reassert_version = ceph_decode_64(&p); 1822 osdmap_epoch = ceph_decode_32(&p); 1823 1824 /* lookup */ 1825 down_read(&osdc->map_sem); 1826 mutex_lock(&osdc->request_mutex); 1827 req = __lookup_request(osdc, tid); 1828 if (req == NULL) { 1829 dout("handle_reply tid %llu dne\n", tid); 1830 goto bad_mutex; 1831 } 1832 ceph_osdc_get_request(req); 1833 1834 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, 1835 req, result); 1836 1837 ceph_decode_need(&p, end, 4, bad_put); 1838 numops = ceph_decode_32(&p); 1839 if (numops > CEPH_OSD_MAX_OP) 1840 goto bad_put; 1841 if (numops != req->r_num_ops) 1842 goto bad_put; 1843 payload_len = 0; 1844 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put); 1845 for (i = 0; i < numops; i++) { 1846 struct ceph_osd_op *op = p; 1847 int len; 1848 1849 len = le32_to_cpu(op->payload_len); 1850 req->r_reply_op_len[i] = len; 1851 dout(" op %d has %d bytes\n", i, len); 1852 payload_len += len; 1853 p += sizeof(*op); 1854 } 1855 bytes = le32_to_cpu(msg->hdr.data_len); 1856 if (payload_len != bytes) { 1857 pr_warning("sum of op payload lens %d != data_len %d", 1858 payload_len, bytes); 1859 goto bad_put; 1860 } 1861 1862 ceph_decode_need(&p, end, 4 + numops * 4, bad_put); 1863 retry_attempt = ceph_decode_32(&p); 1864 for (i = 0; i < numops; i++) 1865 req->r_reply_op_result[i] = ceph_decode_32(&p); 1866 1867 if (le16_to_cpu(msg->hdr.version) >= 6) { 1868 p += 8 + 4; /* skip replay_version */ 1869 p += 8; /* skip user_version */ 1870 1871 err = ceph_redirect_decode(&p, end, &redir); 1872 if (err) 1873 goto bad_put; 1874 } else { 1875 redir.oloc.pool = -1; 1876 } 1877 1878 if (redir.oloc.pool != -1) { 1879 dout("redirect pool %lld\n", redir.oloc.pool); 1880 1881 __unregister_request(osdc, req); 1882 1883 req->r_target_oloc = redir.oloc; /* struct */ 1884 1885 /* 1886 * Start redirect requests with nofail=true. If 1887 * mapping fails, request will end up on the notarget 1888 * list, waiting for the new osdmap (which can take 1889 * a while), even though the original request mapped 1890 * successfully. In the future we might want to follow 1891 * original request's nofail setting here. 1892 */ 1893 err = __ceph_osdc_start_request(osdc, req, true); 1894 BUG_ON(err); 1895 1896 goto out_unlock; 1897 } 1898 1899 already_completed = req->r_got_reply; 1900 if (!req->r_got_reply) { 1901 req->r_result = result; 1902 dout("handle_reply result %d bytes %d\n", req->r_result, 1903 bytes); 1904 if (req->r_result == 0) 1905 req->r_result = bytes; 1906 1907 /* in case this is a write and we need to replay, */ 1908 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch); 1909 req->r_reassert_version.version = cpu_to_le64(reassert_version); 1910 1911 req->r_got_reply = 1; 1912 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { 1913 dout("handle_reply tid %llu dup ack\n", tid); 1914 goto out_unlock; 1915 } 1916 1917 dout("handle_reply tid %llu flags %d\n", tid, flags); 1918 1919 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) 1920 __register_linger_request(osdc, req); 1921 1922 /* either this is a read, or we got the safe response */ 1923 if (result < 0 || 1924 (flags & CEPH_OSD_FLAG_ONDISK) || 1925 ((flags & CEPH_OSD_FLAG_WRITE) == 0)) 1926 __unregister_request(osdc, req); 1927 1928 mutex_unlock(&osdc->request_mutex); 1929 up_read(&osdc->map_sem); 1930 1931 if (!already_completed) { 1932 if (req->r_unsafe_callback && 1933 result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK)) 1934 req->r_unsafe_callback(req, true); 1935 if (req->r_callback) 1936 req->r_callback(req, msg); 1937 else 1938 complete_all(&req->r_completion); 1939 } 1940 1941 if (flags & CEPH_OSD_FLAG_ONDISK) { 1942 if (req->r_unsafe_callback && already_completed) 1943 req->r_unsafe_callback(req, false); 1944 complete_request(req); 1945 } 1946 1947 out: 1948 dout("req=%p req->r_linger=%d\n", req, req->r_linger); 1949 ceph_osdc_put_request(req); 1950 return; 1951 out_unlock: 1952 mutex_unlock(&osdc->request_mutex); 1953 up_read(&osdc->map_sem); 1954 goto out; 1955 1956 bad_put: 1957 req->r_result = -EIO; 1958 __unregister_request(osdc, req); 1959 if (req->r_callback) 1960 req->r_callback(req, msg); 1961 else 1962 complete_all(&req->r_completion); 1963 complete_request(req); 1964 ceph_osdc_put_request(req); 1965 bad_mutex: 1966 mutex_unlock(&osdc->request_mutex); 1967 up_read(&osdc->map_sem); 1968 bad: 1969 pr_err("corrupt osd_op_reply got %d %d\n", 1970 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); 1971 ceph_msg_dump(msg); 1972 } 1973 1974 static void reset_changed_osds(struct ceph_osd_client *osdc) 1975 { 1976 struct rb_node *p, *n; 1977 1978 for (p = rb_first(&osdc->osds); p; p = n) { 1979 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); 1980 1981 n = rb_next(p); 1982 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 1983 memcmp(&osd->o_con.peer_addr, 1984 ceph_osd_addr(osdc->osdmap, 1985 osd->o_osd), 1986 sizeof(struct ceph_entity_addr)) != 0) 1987 __reset_osd(osdc, osd); 1988 } 1989 } 1990 1991 /* 1992 * Requeue requests whose mapping to an OSD has changed. If requests map to 1993 * no osd, request a new map. 1994 * 1995 * Caller should hold map_sem for read. 1996 */ 1997 static void kick_requests(struct ceph_osd_client *osdc, bool force_resend, 1998 bool force_resend_writes) 1999 { 2000 struct ceph_osd_request *req, *nreq; 2001 struct rb_node *p; 2002 int needmap = 0; 2003 int err; 2004 bool force_resend_req; 2005 2006 dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "", 2007 force_resend_writes ? " (force resend writes)" : ""); 2008 mutex_lock(&osdc->request_mutex); 2009 for (p = rb_first(&osdc->requests); p; ) { 2010 req = rb_entry(p, struct ceph_osd_request, r_node); 2011 p = rb_next(p); 2012 2013 /* 2014 * For linger requests that have not yet been 2015 * registered, move them to the linger list; they'll 2016 * be sent to the osd in the loop below. Unregister 2017 * the request before re-registering it as a linger 2018 * request to ensure the __map_request() below 2019 * will decide it needs to be sent. 2020 */ 2021 if (req->r_linger && list_empty(&req->r_linger_item)) { 2022 dout("%p tid %llu restart on osd%d\n", 2023 req, req->r_tid, 2024 req->r_osd ? req->r_osd->o_osd : -1); 2025 ceph_osdc_get_request(req); 2026 __unregister_request(osdc, req); 2027 __register_linger_request(osdc, req); 2028 ceph_osdc_put_request(req); 2029 continue; 2030 } 2031 2032 force_resend_req = force_resend || 2033 (force_resend_writes && 2034 req->r_flags & CEPH_OSD_FLAG_WRITE); 2035 err = __map_request(osdc, req, force_resend_req); 2036 if (err < 0) 2037 continue; /* error */ 2038 if (req->r_osd == NULL) { 2039 dout("%p tid %llu maps to no osd\n", req, req->r_tid); 2040 needmap++; /* request a newer map */ 2041 } else if (err > 0) { 2042 if (!req->r_linger) { 2043 dout("%p tid %llu requeued on osd%d\n", req, 2044 req->r_tid, 2045 req->r_osd ? req->r_osd->o_osd : -1); 2046 req->r_flags |= CEPH_OSD_FLAG_RETRY; 2047 } 2048 } 2049 } 2050 2051 list_for_each_entry_safe(req, nreq, &osdc->req_linger, 2052 r_linger_item) { 2053 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); 2054 2055 err = __map_request(osdc, req, 2056 force_resend || force_resend_writes); 2057 dout("__map_request returned %d\n", err); 2058 if (err == 0) 2059 continue; /* no change and no osd was specified */ 2060 if (err < 0) 2061 continue; /* hrm! */ 2062 if (req->r_osd == NULL) { 2063 dout("tid %llu maps to no valid osd\n", req->r_tid); 2064 needmap++; /* request a newer map */ 2065 continue; 2066 } 2067 2068 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 2069 req->r_osd ? req->r_osd->o_osd : -1); 2070 __register_request(osdc, req); 2071 __unregister_linger_request(osdc, req); 2072 } 2073 reset_changed_osds(osdc); 2074 mutex_unlock(&osdc->request_mutex); 2075 2076 if (needmap) { 2077 dout("%d requests for down osds, need new map\n", needmap); 2078 ceph_monc_request_next_osdmap(&osdc->client->monc); 2079 } 2080 } 2081 2082 2083 /* 2084 * Process updated osd map. 2085 * 2086 * The message contains any number of incremental and full maps, normally 2087 * indicating some sort of topology change in the cluster. Kick requests 2088 * off to different OSDs as needed. 2089 */ 2090 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 2091 { 2092 void *p, *end, *next; 2093 u32 nr_maps, maplen; 2094 u32 epoch; 2095 struct ceph_osdmap *newmap = NULL, *oldmap; 2096 int err; 2097 struct ceph_fsid fsid; 2098 bool was_full; 2099 2100 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); 2101 p = msg->front.iov_base; 2102 end = p + msg->front.iov_len; 2103 2104 /* verify fsid */ 2105 ceph_decode_need(&p, end, sizeof(fsid), bad); 2106 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 2107 if (ceph_check_fsid(osdc->client, &fsid) < 0) 2108 return; 2109 2110 down_write(&osdc->map_sem); 2111 2112 was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 2113 2114 /* incremental maps */ 2115 ceph_decode_32_safe(&p, end, nr_maps, bad); 2116 dout(" %d inc maps\n", nr_maps); 2117 while (nr_maps > 0) { 2118 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 2119 epoch = ceph_decode_32(&p); 2120 maplen = ceph_decode_32(&p); 2121 ceph_decode_need(&p, end, maplen, bad); 2122 next = p + maplen; 2123 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { 2124 dout("applying incremental map %u len %d\n", 2125 epoch, maplen); 2126 newmap = osdmap_apply_incremental(&p, next, 2127 osdc->osdmap, 2128 &osdc->client->msgr); 2129 if (IS_ERR(newmap)) { 2130 err = PTR_ERR(newmap); 2131 goto bad; 2132 } 2133 BUG_ON(!newmap); 2134 if (newmap != osdc->osdmap) { 2135 ceph_osdmap_destroy(osdc->osdmap); 2136 osdc->osdmap = newmap; 2137 } 2138 was_full = was_full || 2139 ceph_osdmap_flag(osdc->osdmap, 2140 CEPH_OSDMAP_FULL); 2141 kick_requests(osdc, 0, was_full); 2142 } else { 2143 dout("ignoring incremental map %u len %d\n", 2144 epoch, maplen); 2145 } 2146 p = next; 2147 nr_maps--; 2148 } 2149 if (newmap) 2150 goto done; 2151 2152 /* full maps */ 2153 ceph_decode_32_safe(&p, end, nr_maps, bad); 2154 dout(" %d full maps\n", nr_maps); 2155 while (nr_maps) { 2156 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 2157 epoch = ceph_decode_32(&p); 2158 maplen = ceph_decode_32(&p); 2159 ceph_decode_need(&p, end, maplen, bad); 2160 if (nr_maps > 1) { 2161 dout("skipping non-latest full map %u len %d\n", 2162 epoch, maplen); 2163 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { 2164 dout("skipping full map %u len %d, " 2165 "older than our %u\n", epoch, maplen, 2166 osdc->osdmap->epoch); 2167 } else { 2168 int skipped_map = 0; 2169 2170 dout("taking full map %u len %d\n", epoch, maplen); 2171 newmap = ceph_osdmap_decode(&p, p+maplen); 2172 if (IS_ERR(newmap)) { 2173 err = PTR_ERR(newmap); 2174 goto bad; 2175 } 2176 BUG_ON(!newmap); 2177 oldmap = osdc->osdmap; 2178 osdc->osdmap = newmap; 2179 if (oldmap) { 2180 if (oldmap->epoch + 1 < newmap->epoch) 2181 skipped_map = 1; 2182 ceph_osdmap_destroy(oldmap); 2183 } 2184 was_full = was_full || 2185 ceph_osdmap_flag(osdc->osdmap, 2186 CEPH_OSDMAP_FULL); 2187 kick_requests(osdc, skipped_map, was_full); 2188 } 2189 p += maplen; 2190 nr_maps--; 2191 } 2192 2193 if (!osdc->osdmap) 2194 goto bad; 2195 done: 2196 downgrade_write(&osdc->map_sem); 2197 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); 2198 2199 /* 2200 * subscribe to subsequent osdmap updates if full to ensure 2201 * we find out when we are no longer full and stop returning 2202 * ENOSPC. 2203 */ 2204 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 2205 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || 2206 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) 2207 ceph_monc_request_next_osdmap(&osdc->client->monc); 2208 2209 mutex_lock(&osdc->request_mutex); 2210 __send_queued(osdc); 2211 mutex_unlock(&osdc->request_mutex); 2212 up_read(&osdc->map_sem); 2213 wake_up_all(&osdc->client->auth_wq); 2214 return; 2215 2216 bad: 2217 pr_err("osdc handle_map corrupt msg\n"); 2218 ceph_msg_dump(msg); 2219 up_write(&osdc->map_sem); 2220 } 2221 2222 /* 2223 * watch/notify callback event infrastructure 2224 * 2225 * These callbacks are used both for watch and notify operations. 2226 */ 2227 static void __release_event(struct kref *kref) 2228 { 2229 struct ceph_osd_event *event = 2230 container_of(kref, struct ceph_osd_event, kref); 2231 2232 dout("__release_event %p\n", event); 2233 kfree(event); 2234 } 2235 2236 static void get_event(struct ceph_osd_event *event) 2237 { 2238 kref_get(&event->kref); 2239 } 2240 2241 void ceph_osdc_put_event(struct ceph_osd_event *event) 2242 { 2243 kref_put(&event->kref, __release_event); 2244 } 2245 EXPORT_SYMBOL(ceph_osdc_put_event); 2246 2247 static void __insert_event(struct ceph_osd_client *osdc, 2248 struct ceph_osd_event *new) 2249 { 2250 struct rb_node **p = &osdc->event_tree.rb_node; 2251 struct rb_node *parent = NULL; 2252 struct ceph_osd_event *event = NULL; 2253 2254 while (*p) { 2255 parent = *p; 2256 event = rb_entry(parent, struct ceph_osd_event, node); 2257 if (new->cookie < event->cookie) 2258 p = &(*p)->rb_left; 2259 else if (new->cookie > event->cookie) 2260 p = &(*p)->rb_right; 2261 else 2262 BUG(); 2263 } 2264 2265 rb_link_node(&new->node, parent, p); 2266 rb_insert_color(&new->node, &osdc->event_tree); 2267 } 2268 2269 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc, 2270 u64 cookie) 2271 { 2272 struct rb_node **p = &osdc->event_tree.rb_node; 2273 struct rb_node *parent = NULL; 2274 struct ceph_osd_event *event = NULL; 2275 2276 while (*p) { 2277 parent = *p; 2278 event = rb_entry(parent, struct ceph_osd_event, node); 2279 if (cookie < event->cookie) 2280 p = &(*p)->rb_left; 2281 else if (cookie > event->cookie) 2282 p = &(*p)->rb_right; 2283 else 2284 return event; 2285 } 2286 return NULL; 2287 } 2288 2289 static void __remove_event(struct ceph_osd_event *event) 2290 { 2291 struct ceph_osd_client *osdc = event->osdc; 2292 2293 if (!RB_EMPTY_NODE(&event->node)) { 2294 dout("__remove_event removed %p\n", event); 2295 rb_erase(&event->node, &osdc->event_tree); 2296 ceph_osdc_put_event(event); 2297 } else { 2298 dout("__remove_event didn't remove %p\n", event); 2299 } 2300 } 2301 2302 int ceph_osdc_create_event(struct ceph_osd_client *osdc, 2303 void (*event_cb)(u64, u64, u8, void *), 2304 void *data, struct ceph_osd_event **pevent) 2305 { 2306 struct ceph_osd_event *event; 2307 2308 event = kmalloc(sizeof(*event), GFP_NOIO); 2309 if (!event) 2310 return -ENOMEM; 2311 2312 dout("create_event %p\n", event); 2313 event->cb = event_cb; 2314 event->one_shot = 0; 2315 event->data = data; 2316 event->osdc = osdc; 2317 INIT_LIST_HEAD(&event->osd_node); 2318 RB_CLEAR_NODE(&event->node); 2319 kref_init(&event->kref); /* one ref for us */ 2320 kref_get(&event->kref); /* one ref for the caller */ 2321 2322 spin_lock(&osdc->event_lock); 2323 event->cookie = ++osdc->event_count; 2324 __insert_event(osdc, event); 2325 spin_unlock(&osdc->event_lock); 2326 2327 *pevent = event; 2328 return 0; 2329 } 2330 EXPORT_SYMBOL(ceph_osdc_create_event); 2331 2332 void ceph_osdc_cancel_event(struct ceph_osd_event *event) 2333 { 2334 struct ceph_osd_client *osdc = event->osdc; 2335 2336 dout("cancel_event %p\n", event); 2337 spin_lock(&osdc->event_lock); 2338 __remove_event(event); 2339 spin_unlock(&osdc->event_lock); 2340 ceph_osdc_put_event(event); /* caller's */ 2341 } 2342 EXPORT_SYMBOL(ceph_osdc_cancel_event); 2343 2344 2345 static void do_event_work(struct work_struct *work) 2346 { 2347 struct ceph_osd_event_work *event_work = 2348 container_of(work, struct ceph_osd_event_work, work); 2349 struct ceph_osd_event *event = event_work->event; 2350 u64 ver = event_work->ver; 2351 u64 notify_id = event_work->notify_id; 2352 u8 opcode = event_work->opcode; 2353 2354 dout("do_event_work completing %p\n", event); 2355 event->cb(ver, notify_id, opcode, event->data); 2356 dout("do_event_work completed %p\n", event); 2357 ceph_osdc_put_event(event); 2358 kfree(event_work); 2359 } 2360 2361 2362 /* 2363 * Process osd watch notifications 2364 */ 2365 static void handle_watch_notify(struct ceph_osd_client *osdc, 2366 struct ceph_msg *msg) 2367 { 2368 void *p, *end; 2369 u8 proto_ver; 2370 u64 cookie, ver, notify_id; 2371 u8 opcode; 2372 struct ceph_osd_event *event; 2373 struct ceph_osd_event_work *event_work; 2374 2375 p = msg->front.iov_base; 2376 end = p + msg->front.iov_len; 2377 2378 ceph_decode_8_safe(&p, end, proto_ver, bad); 2379 ceph_decode_8_safe(&p, end, opcode, bad); 2380 ceph_decode_64_safe(&p, end, cookie, bad); 2381 ceph_decode_64_safe(&p, end, ver, bad); 2382 ceph_decode_64_safe(&p, end, notify_id, bad); 2383 2384 spin_lock(&osdc->event_lock); 2385 event = __find_event(osdc, cookie); 2386 if (event) { 2387 BUG_ON(event->one_shot); 2388 get_event(event); 2389 } 2390 spin_unlock(&osdc->event_lock); 2391 dout("handle_watch_notify cookie %lld ver %lld event %p\n", 2392 cookie, ver, event); 2393 if (event) { 2394 event_work = kmalloc(sizeof(*event_work), GFP_NOIO); 2395 if (!event_work) { 2396 dout("ERROR: could not allocate event_work\n"); 2397 goto done_err; 2398 } 2399 INIT_WORK(&event_work->work, do_event_work); 2400 event_work->event = event; 2401 event_work->ver = ver; 2402 event_work->notify_id = notify_id; 2403 event_work->opcode = opcode; 2404 if (!queue_work(osdc->notify_wq, &event_work->work)) { 2405 dout("WARNING: failed to queue notify event work\n"); 2406 goto done_err; 2407 } 2408 } 2409 2410 return; 2411 2412 done_err: 2413 ceph_osdc_put_event(event); 2414 return; 2415 2416 bad: 2417 pr_err("osdc handle_watch_notify corrupt msg\n"); 2418 } 2419 2420 /* 2421 * build new request AND message 2422 * 2423 */ 2424 void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, 2425 struct ceph_snap_context *snapc, u64 snap_id, 2426 struct timespec *mtime) 2427 { 2428 struct ceph_msg *msg = req->r_request; 2429 void *p; 2430 size_t msg_size; 2431 int flags = req->r_flags; 2432 u64 data_len; 2433 unsigned int i; 2434 2435 req->r_snapid = snap_id; 2436 req->r_snapc = ceph_get_snap_context(snapc); 2437 2438 /* encode request */ 2439 msg->hdr.version = cpu_to_le16(4); 2440 2441 p = msg->front.iov_base; 2442 ceph_encode_32(&p, 1); /* client_inc is always 1 */ 2443 req->r_request_osdmap_epoch = p; 2444 p += 4; 2445 req->r_request_flags = p; 2446 p += 4; 2447 if (req->r_flags & CEPH_OSD_FLAG_WRITE) 2448 ceph_encode_timespec(p, mtime); 2449 p += sizeof(struct ceph_timespec); 2450 req->r_request_reassert_version = p; 2451 p += sizeof(struct ceph_eversion); /* will get filled in */ 2452 2453 /* oloc */ 2454 ceph_encode_8(&p, 4); 2455 ceph_encode_8(&p, 4); 2456 ceph_encode_32(&p, 8 + 4 + 4); 2457 req->r_request_pool = p; 2458 p += 8; 2459 ceph_encode_32(&p, -1); /* preferred */ 2460 ceph_encode_32(&p, 0); /* key len */ 2461 2462 ceph_encode_8(&p, 1); 2463 req->r_request_pgid = p; 2464 p += 8 + 4; 2465 ceph_encode_32(&p, -1); /* preferred */ 2466 2467 /* oid */ 2468 ceph_encode_32(&p, req->r_base_oid.name_len); 2469 memcpy(p, req->r_base_oid.name, req->r_base_oid.name_len); 2470 dout("oid '%.*s' len %d\n", req->r_base_oid.name_len, 2471 req->r_base_oid.name, req->r_base_oid.name_len); 2472 p += req->r_base_oid.name_len; 2473 2474 /* ops--can imply data */ 2475 ceph_encode_16(&p, (u16)req->r_num_ops); 2476 data_len = 0; 2477 for (i = 0; i < req->r_num_ops; i++) { 2478 data_len += osd_req_encode_op(req, p, i); 2479 p += sizeof(struct ceph_osd_op); 2480 } 2481 2482 /* snaps */ 2483 ceph_encode_64(&p, req->r_snapid); 2484 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); 2485 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); 2486 if (req->r_snapc) { 2487 for (i = 0; i < snapc->num_snaps; i++) { 2488 ceph_encode_64(&p, req->r_snapc->snaps[i]); 2489 } 2490 } 2491 2492 req->r_request_attempts = p; 2493 p += 4; 2494 2495 /* data */ 2496 if (flags & CEPH_OSD_FLAG_WRITE) { 2497 u16 data_off; 2498 2499 /* 2500 * The header "data_off" is a hint to the receiver 2501 * allowing it to align received data into its 2502 * buffers such that there's no need to re-copy 2503 * it before writing it to disk (direct I/O). 2504 */ 2505 data_off = (u16) (off & 0xffff); 2506 req->r_request->hdr.data_off = cpu_to_le16(data_off); 2507 } 2508 req->r_request->hdr.data_len = cpu_to_le32(data_len); 2509 2510 BUG_ON(p > msg->front.iov_base + msg->front.iov_len); 2511 msg_size = p - msg->front.iov_base; 2512 msg->front.iov_len = msg_size; 2513 msg->hdr.front_len = cpu_to_le32(msg_size); 2514 2515 dout("build_request msg_size was %d\n", (int)msg_size); 2516 } 2517 EXPORT_SYMBOL(ceph_osdc_build_request); 2518 2519 /* 2520 * Register request, send initial attempt. 2521 */ 2522 int ceph_osdc_start_request(struct ceph_osd_client *osdc, 2523 struct ceph_osd_request *req, 2524 bool nofail) 2525 { 2526 int rc; 2527 2528 down_read(&osdc->map_sem); 2529 mutex_lock(&osdc->request_mutex); 2530 2531 rc = __ceph_osdc_start_request(osdc, req, nofail); 2532 2533 mutex_unlock(&osdc->request_mutex); 2534 up_read(&osdc->map_sem); 2535 2536 return rc; 2537 } 2538 EXPORT_SYMBOL(ceph_osdc_start_request); 2539 2540 /* 2541 * Unregister a registered request. The request is not completed (i.e. 2542 * no callbacks or wakeups) - higher layers are supposed to know what 2543 * they are canceling. 2544 */ 2545 void ceph_osdc_cancel_request(struct ceph_osd_request *req) 2546 { 2547 struct ceph_osd_client *osdc = req->r_osdc; 2548 2549 mutex_lock(&osdc->request_mutex); 2550 if (req->r_linger) 2551 __unregister_linger_request(osdc, req); 2552 __unregister_request(osdc, req); 2553 mutex_unlock(&osdc->request_mutex); 2554 2555 dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid); 2556 } 2557 EXPORT_SYMBOL(ceph_osdc_cancel_request); 2558 2559 /* 2560 * wait for a request to complete 2561 */ 2562 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 2563 struct ceph_osd_request *req) 2564 { 2565 int rc; 2566 2567 dout("%s %p tid %llu\n", __func__, req, req->r_tid); 2568 2569 rc = wait_for_completion_interruptible(&req->r_completion); 2570 if (rc < 0) { 2571 dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid); 2572 ceph_osdc_cancel_request(req); 2573 complete_request(req); 2574 return rc; 2575 } 2576 2577 dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid, 2578 req->r_result); 2579 return req->r_result; 2580 } 2581 EXPORT_SYMBOL(ceph_osdc_wait_request); 2582 2583 /* 2584 * sync - wait for all in-flight requests to flush. avoid starvation. 2585 */ 2586 void ceph_osdc_sync(struct ceph_osd_client *osdc) 2587 { 2588 struct ceph_osd_request *req; 2589 u64 last_tid, next_tid = 0; 2590 2591 mutex_lock(&osdc->request_mutex); 2592 last_tid = osdc->last_tid; 2593 while (1) { 2594 req = __lookup_request_ge(osdc, next_tid); 2595 if (!req) 2596 break; 2597 if (req->r_tid > last_tid) 2598 break; 2599 2600 next_tid = req->r_tid + 1; 2601 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) 2602 continue; 2603 2604 ceph_osdc_get_request(req); 2605 mutex_unlock(&osdc->request_mutex); 2606 dout("sync waiting on tid %llu (last is %llu)\n", 2607 req->r_tid, last_tid); 2608 wait_for_completion(&req->r_safe_completion); 2609 mutex_lock(&osdc->request_mutex); 2610 ceph_osdc_put_request(req); 2611 } 2612 mutex_unlock(&osdc->request_mutex); 2613 dout("sync done (thru tid %llu)\n", last_tid); 2614 } 2615 EXPORT_SYMBOL(ceph_osdc_sync); 2616 2617 /* 2618 * Call all pending notify callbacks - for use after a watch is 2619 * unregistered, to make sure no more callbacks for it will be invoked 2620 */ 2621 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 2622 { 2623 flush_workqueue(osdc->notify_wq); 2624 } 2625 EXPORT_SYMBOL(ceph_osdc_flush_notifies); 2626 2627 2628 /* 2629 * init, shutdown 2630 */ 2631 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 2632 { 2633 int err; 2634 2635 dout("init\n"); 2636 osdc->client = client; 2637 osdc->osdmap = NULL; 2638 init_rwsem(&osdc->map_sem); 2639 init_completion(&osdc->map_waiters); 2640 osdc->last_requested_map = 0; 2641 mutex_init(&osdc->request_mutex); 2642 osdc->last_tid = 0; 2643 osdc->osds = RB_ROOT; 2644 INIT_LIST_HEAD(&osdc->osd_lru); 2645 osdc->requests = RB_ROOT; 2646 INIT_LIST_HEAD(&osdc->req_lru); 2647 INIT_LIST_HEAD(&osdc->req_unsent); 2648 INIT_LIST_HEAD(&osdc->req_notarget); 2649 INIT_LIST_HEAD(&osdc->req_linger); 2650 osdc->num_requests = 0; 2651 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 2652 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 2653 spin_lock_init(&osdc->event_lock); 2654 osdc->event_tree = RB_ROOT; 2655 osdc->event_count = 0; 2656 2657 schedule_delayed_work(&osdc->osds_timeout_work, 2658 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); 2659 2660 err = -ENOMEM; 2661 osdc->req_mempool = mempool_create_kmalloc_pool(10, 2662 sizeof(struct ceph_osd_request)); 2663 if (!osdc->req_mempool) 2664 goto out; 2665 2666 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 2667 OSD_OP_FRONT_LEN, 10, true, 2668 "osd_op"); 2669 if (err < 0) 2670 goto out_mempool; 2671 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 2672 OSD_OPREPLY_FRONT_LEN, 10, true, 2673 "osd_op_reply"); 2674 if (err < 0) 2675 goto out_msgpool; 2676 2677 err = -ENOMEM; 2678 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 2679 if (!osdc->notify_wq) 2680 goto out_msgpool_reply; 2681 2682 return 0; 2683 2684 out_msgpool_reply: 2685 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 2686 out_msgpool: 2687 ceph_msgpool_destroy(&osdc->msgpool_op); 2688 out_mempool: 2689 mempool_destroy(osdc->req_mempool); 2690 out: 2691 return err; 2692 } 2693 2694 void ceph_osdc_stop(struct ceph_osd_client *osdc) 2695 { 2696 flush_workqueue(osdc->notify_wq); 2697 destroy_workqueue(osdc->notify_wq); 2698 cancel_delayed_work_sync(&osdc->timeout_work); 2699 cancel_delayed_work_sync(&osdc->osds_timeout_work); 2700 if (osdc->osdmap) { 2701 ceph_osdmap_destroy(osdc->osdmap); 2702 osdc->osdmap = NULL; 2703 } 2704 remove_all_osds(osdc); 2705 mempool_destroy(osdc->req_mempool); 2706 ceph_msgpool_destroy(&osdc->msgpool_op); 2707 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 2708 } 2709 2710 /* 2711 * Read some contiguous pages. If we cross a stripe boundary, shorten 2712 * *plen. Return number of bytes read, or error. 2713 */ 2714 int ceph_osdc_readpages(struct ceph_osd_client *osdc, 2715 struct ceph_vino vino, struct ceph_file_layout *layout, 2716 u64 off, u64 *plen, 2717 u32 truncate_seq, u64 truncate_size, 2718 struct page **pages, int num_pages, int page_align) 2719 { 2720 struct ceph_osd_request *req; 2721 int rc = 0; 2722 2723 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, 2724 vino.snap, off, *plen); 2725 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, 2726 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2727 NULL, truncate_seq, truncate_size, 2728 false); 2729 if (IS_ERR(req)) 2730 return PTR_ERR(req); 2731 2732 /* it may be a short read due to an object boundary */ 2733 2734 osd_req_op_extent_osd_data_pages(req, 0, 2735 pages, *plen, page_align, false, false); 2736 2737 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", 2738 off, *plen, *plen, page_align); 2739 2740 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); 2741 2742 rc = ceph_osdc_start_request(osdc, req, false); 2743 if (!rc) 2744 rc = ceph_osdc_wait_request(osdc, req); 2745 2746 ceph_osdc_put_request(req); 2747 dout("readpages result %d\n", rc); 2748 return rc; 2749 } 2750 EXPORT_SYMBOL(ceph_osdc_readpages); 2751 2752 /* 2753 * do a synchronous write on N pages 2754 */ 2755 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, 2756 struct ceph_file_layout *layout, 2757 struct ceph_snap_context *snapc, 2758 u64 off, u64 len, 2759 u32 truncate_seq, u64 truncate_size, 2760 struct timespec *mtime, 2761 struct page **pages, int num_pages) 2762 { 2763 struct ceph_osd_request *req; 2764 int rc = 0; 2765 int page_align = off & ~PAGE_MASK; 2766 2767 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ 2768 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, 2769 CEPH_OSD_OP_WRITE, 2770 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, 2771 snapc, truncate_seq, truncate_size, 2772 true); 2773 if (IS_ERR(req)) 2774 return PTR_ERR(req); 2775 2776 /* it may be a short write due to an object boundary */ 2777 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, 2778 false, false); 2779 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); 2780 2781 ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime); 2782 2783 rc = ceph_osdc_start_request(osdc, req, true); 2784 if (!rc) 2785 rc = ceph_osdc_wait_request(osdc, req); 2786 2787 ceph_osdc_put_request(req); 2788 if (rc == 0) 2789 rc = len; 2790 dout("writepages result %d\n", rc); 2791 return rc; 2792 } 2793 EXPORT_SYMBOL(ceph_osdc_writepages); 2794 2795 int ceph_osdc_setup(void) 2796 { 2797 BUG_ON(ceph_osd_request_cache); 2798 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", 2799 sizeof (struct ceph_osd_request), 2800 __alignof__(struct ceph_osd_request), 2801 0, NULL); 2802 2803 return ceph_osd_request_cache ? 0 : -ENOMEM; 2804 } 2805 EXPORT_SYMBOL(ceph_osdc_setup); 2806 2807 void ceph_osdc_cleanup(void) 2808 { 2809 BUG_ON(!ceph_osd_request_cache); 2810 kmem_cache_destroy(ceph_osd_request_cache); 2811 ceph_osd_request_cache = NULL; 2812 } 2813 EXPORT_SYMBOL(ceph_osdc_cleanup); 2814 2815 /* 2816 * handle incoming message 2817 */ 2818 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 2819 { 2820 struct ceph_osd *osd = con->private; 2821 struct ceph_osd_client *osdc; 2822 int type = le16_to_cpu(msg->hdr.type); 2823 2824 if (!osd) 2825 goto out; 2826 osdc = osd->o_osdc; 2827 2828 switch (type) { 2829 case CEPH_MSG_OSD_MAP: 2830 ceph_osdc_handle_map(osdc, msg); 2831 break; 2832 case CEPH_MSG_OSD_OPREPLY: 2833 handle_reply(osdc, msg, con); 2834 break; 2835 case CEPH_MSG_WATCH_NOTIFY: 2836 handle_watch_notify(osdc, msg); 2837 break; 2838 2839 default: 2840 pr_err("received unknown message type %d %s\n", type, 2841 ceph_msg_type_name(type)); 2842 } 2843 out: 2844 ceph_msg_put(msg); 2845 } 2846 2847 /* 2848 * lookup and return message for incoming reply. set up reply message 2849 * pages. 2850 */ 2851 static struct ceph_msg *get_reply(struct ceph_connection *con, 2852 struct ceph_msg_header *hdr, 2853 int *skip) 2854 { 2855 struct ceph_osd *osd = con->private; 2856 struct ceph_osd_client *osdc = osd->o_osdc; 2857 struct ceph_msg *m; 2858 struct ceph_osd_request *req; 2859 int front_len = le32_to_cpu(hdr->front_len); 2860 int data_len = le32_to_cpu(hdr->data_len); 2861 u64 tid; 2862 2863 tid = le64_to_cpu(hdr->tid); 2864 mutex_lock(&osdc->request_mutex); 2865 req = __lookup_request(osdc, tid); 2866 if (!req) { 2867 *skip = 1; 2868 m = NULL; 2869 dout("get_reply unknown tid %llu from osd%d\n", tid, 2870 osd->o_osd); 2871 goto out; 2872 } 2873 2874 if (req->r_reply->con) 2875 dout("%s revoking msg %p from old con %p\n", __func__, 2876 req->r_reply, req->r_reply->con); 2877 ceph_msg_revoke_incoming(req->r_reply); 2878 2879 if (front_len > req->r_reply->front_alloc_len) { 2880 pr_warning("get_reply front %d > preallocated %d (%u#%llu)\n", 2881 front_len, req->r_reply->front_alloc_len, 2882 (unsigned int)con->peer_name.type, 2883 le64_to_cpu(con->peer_name.num)); 2884 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 2885 false); 2886 if (!m) 2887 goto out; 2888 ceph_msg_put(req->r_reply); 2889 req->r_reply = m; 2890 } 2891 m = ceph_msg_get(req->r_reply); 2892 2893 if (data_len > 0) { 2894 struct ceph_osd_data *osd_data; 2895 2896 /* 2897 * XXX This is assuming there is only one op containing 2898 * XXX page data. Probably OK for reads, but this 2899 * XXX ought to be done more generally. 2900 */ 2901 osd_data = osd_req_op_extent_osd_data(req, 0); 2902 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 2903 if (osd_data->pages && 2904 unlikely(osd_data->length < data_len)) { 2905 2906 pr_warning("tid %lld reply has %d bytes " 2907 "we had only %llu bytes ready\n", 2908 tid, data_len, osd_data->length); 2909 *skip = 1; 2910 ceph_msg_put(m); 2911 m = NULL; 2912 goto out; 2913 } 2914 } 2915 } 2916 *skip = 0; 2917 dout("get_reply tid %lld %p\n", tid, m); 2918 2919 out: 2920 mutex_unlock(&osdc->request_mutex); 2921 return m; 2922 2923 } 2924 2925 static struct ceph_msg *alloc_msg(struct ceph_connection *con, 2926 struct ceph_msg_header *hdr, 2927 int *skip) 2928 { 2929 struct ceph_osd *osd = con->private; 2930 int type = le16_to_cpu(hdr->type); 2931 int front = le32_to_cpu(hdr->front_len); 2932 2933 *skip = 0; 2934 switch (type) { 2935 case CEPH_MSG_OSD_MAP: 2936 case CEPH_MSG_WATCH_NOTIFY: 2937 return ceph_msg_new(type, front, GFP_NOFS, false); 2938 case CEPH_MSG_OSD_OPREPLY: 2939 return get_reply(con, hdr, skip); 2940 default: 2941 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, 2942 osd->o_osd); 2943 *skip = 1; 2944 return NULL; 2945 } 2946 } 2947 2948 /* 2949 * Wrappers to refcount containing ceph_osd struct 2950 */ 2951 static struct ceph_connection *get_osd_con(struct ceph_connection *con) 2952 { 2953 struct ceph_osd *osd = con->private; 2954 if (get_osd(osd)) 2955 return con; 2956 return NULL; 2957 } 2958 2959 static void put_osd_con(struct ceph_connection *con) 2960 { 2961 struct ceph_osd *osd = con->private; 2962 put_osd(osd); 2963 } 2964 2965 /* 2966 * authentication 2967 */ 2968 /* 2969 * Note: returned pointer is the address of a structure that's 2970 * managed separately. Caller must *not* attempt to free it. 2971 */ 2972 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, 2973 int *proto, int force_new) 2974 { 2975 struct ceph_osd *o = con->private; 2976 struct ceph_osd_client *osdc = o->o_osdc; 2977 struct ceph_auth_client *ac = osdc->client->monc.auth; 2978 struct ceph_auth_handshake *auth = &o->o_auth; 2979 2980 if (force_new && auth->authorizer) { 2981 ceph_auth_destroy_authorizer(ac, auth->authorizer); 2982 auth->authorizer = NULL; 2983 } 2984 if (!auth->authorizer) { 2985 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 2986 auth); 2987 if (ret) 2988 return ERR_PTR(ret); 2989 } else { 2990 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 2991 auth); 2992 if (ret) 2993 return ERR_PTR(ret); 2994 } 2995 *proto = ac->protocol; 2996 2997 return auth; 2998 } 2999 3000 3001 static int verify_authorizer_reply(struct ceph_connection *con, int len) 3002 { 3003 struct ceph_osd *o = con->private; 3004 struct ceph_osd_client *osdc = o->o_osdc; 3005 struct ceph_auth_client *ac = osdc->client->monc.auth; 3006 3007 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); 3008 } 3009 3010 static int invalidate_authorizer(struct ceph_connection *con) 3011 { 3012 struct ceph_osd *o = con->private; 3013 struct ceph_osd_client *osdc = o->o_osdc; 3014 struct ceph_auth_client *ac = osdc->client->monc.auth; 3015 3016 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 3017 return ceph_monc_validate_auth(&osdc->client->monc); 3018 } 3019 3020 static const struct ceph_connection_operations osd_con_ops = { 3021 .get = get_osd_con, 3022 .put = put_osd_con, 3023 .dispatch = dispatch, 3024 .get_authorizer = get_authorizer, 3025 .verify_authorizer_reply = verify_authorizer_reply, 3026 .invalidate_authorizer = invalidate_authorizer, 3027 .alloc_msg = alloc_msg, 3028 .fault = osd_reset, 3029 }; 3030 3031 3032 3033 3034 3035 /* LDV_COMMENT_BEGIN_MAIN */ 3036 #ifdef LDV_MAIN6_sequence_infinite_withcheck_stateful 3037 3038 /*###########################################################################*/ 3039 3040 /*############## Driver Environment Generator 0.2 output ####################*/ 3041 3042 /*###########################################################################*/ 3043 3044 3045 3046 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 3047 void ldv_check_final_state(void); 3048 3049 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 3050 void ldv_check_return_value(int res); 3051 3052 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 3053 void ldv_check_return_value_probe(int res); 3054 3055 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 3056 void ldv_initialize(void); 3057 3058 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 3059 void ldv_handler_precall(void); 3060 3061 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 3062 int nondet_int(void); 3063 3064 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 3065 int LDV_IN_INTERRUPT; 3066 3067 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 3068 void ldv_main6_sequence_infinite_withcheck_stateful(void) { 3069 3070 3071 3072 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 3073 /*============================= VARIABLE DECLARATION PART =============================*/ 3074 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3075 /* content: static struct ceph_connection *get_osd_con(struct ceph_connection *con)*/ 3076 /* LDV_COMMENT_BEGIN_PREP */ 3077 #ifdef CONFIG_BLOCK 3078 #endif 3079 #define OSD_OP_FRONT_LEN 4096 3080 #define OSD_OPREPLY_FRONT_LEN 512 3081 #ifdef CONFIG_BLOCK 3082 #endif 3083 #define osd_req_op_data(oreq, whch, typ, fld) \ 3084 ({ \ 3085 BUG_ON(whch >= (oreq)->r_num_ops); \ 3086 &(oreq)->r_ops[whch].typ.fld; \ 3087 }) 3088 #ifdef CONFIG_BLOCK 3089 #endif 3090 #ifdef CONFIG_BLOCK 3091 #endif 3092 #ifdef CONFIG_BLOCK 3093 #endif 3094 /* LDV_COMMENT_END_PREP */ 3095 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_osd_con" */ 3096 struct ceph_connection * var_group1; 3097 /* content: static void put_osd_con(struct ceph_connection *con)*/ 3098 /* LDV_COMMENT_BEGIN_PREP */ 3099 #ifdef CONFIG_BLOCK 3100 #endif 3101 #define OSD_OP_FRONT_LEN 4096 3102 #define OSD_OPREPLY_FRONT_LEN 512 3103 #ifdef CONFIG_BLOCK 3104 #endif 3105 #define osd_req_op_data(oreq, whch, typ, fld) \ 3106 ({ \ 3107 BUG_ON(whch >= (oreq)->r_num_ops); \ 3108 &(oreq)->r_ops[whch].typ.fld; \ 3109 }) 3110 #ifdef CONFIG_BLOCK 3111 #endif 3112 #ifdef CONFIG_BLOCK 3113 #endif 3114 #ifdef CONFIG_BLOCK 3115 #endif 3116 /* LDV_COMMENT_END_PREP */ 3117 /* content: static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)*/ 3118 /* LDV_COMMENT_BEGIN_PREP */ 3119 #ifdef CONFIG_BLOCK 3120 #endif 3121 #define OSD_OP_FRONT_LEN 4096 3122 #define OSD_OPREPLY_FRONT_LEN 512 3123 #ifdef CONFIG_BLOCK 3124 #endif 3125 #define osd_req_op_data(oreq, whch, typ, fld) \ 3126 ({ \ 3127 BUG_ON(whch >= (oreq)->r_num_ops); \ 3128 &(oreq)->r_ops[whch].typ.fld; \ 3129 }) 3130 #ifdef CONFIG_BLOCK 3131 #endif 3132 #ifdef CONFIG_BLOCK 3133 #endif 3134 #ifdef CONFIG_BLOCK 3135 #endif 3136 /* LDV_COMMENT_END_PREP */ 3137 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dispatch" */ 3138 struct ceph_msg * var_group2; 3139 /* content: static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, int *proto, int force_new)*/ 3140 /* LDV_COMMENT_BEGIN_PREP */ 3141 #ifdef CONFIG_BLOCK 3142 #endif 3143 #define OSD_OP_FRONT_LEN 4096 3144 #define OSD_OPREPLY_FRONT_LEN 512 3145 #ifdef CONFIG_BLOCK 3146 #endif 3147 #define osd_req_op_data(oreq, whch, typ, fld) \ 3148 ({ \ 3149 BUG_ON(whch >= (oreq)->r_num_ops); \ 3150 &(oreq)->r_ops[whch].typ.fld; \ 3151 }) 3152 #ifdef CONFIG_BLOCK 3153 #endif 3154 #ifdef CONFIG_BLOCK 3155 #endif 3156 #ifdef CONFIG_BLOCK 3157 #endif 3158 /* LDV_COMMENT_END_PREP */ 3159 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_authorizer" */ 3160 int * var_get_authorizer_100_p1; 3161 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_authorizer" */ 3162 int var_get_authorizer_100_p2; 3163 /* content: static int verify_authorizer_reply(struct ceph_connection *con, int len)*/ 3164 /* LDV_COMMENT_BEGIN_PREP */ 3165 #ifdef CONFIG_BLOCK 3166 #endif 3167 #define OSD_OP_FRONT_LEN 4096 3168 #define OSD_OPREPLY_FRONT_LEN 512 3169 #ifdef CONFIG_BLOCK 3170 #endif 3171 #define osd_req_op_data(oreq, whch, typ, fld) \ 3172 ({ \ 3173 BUG_ON(whch >= (oreq)->r_num_ops); \ 3174 &(oreq)->r_ops[whch].typ.fld; \ 3175 }) 3176 #ifdef CONFIG_BLOCK 3177 #endif 3178 #ifdef CONFIG_BLOCK 3179 #endif 3180 #ifdef CONFIG_BLOCK 3181 #endif 3182 /* LDV_COMMENT_END_PREP */ 3183 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "verify_authorizer_reply" */ 3184 int var_verify_authorizer_reply_101_p1; 3185 /* content: static int invalidate_authorizer(struct ceph_connection *con)*/ 3186 /* LDV_COMMENT_BEGIN_PREP */ 3187 #ifdef CONFIG_BLOCK 3188 #endif 3189 #define OSD_OP_FRONT_LEN 4096 3190 #define OSD_OPREPLY_FRONT_LEN 512 3191 #ifdef CONFIG_BLOCK 3192 #endif 3193 #define osd_req_op_data(oreq, whch, typ, fld) \ 3194 ({ \ 3195 BUG_ON(whch >= (oreq)->r_num_ops); \ 3196 &(oreq)->r_ops[whch].typ.fld; \ 3197 }) 3198 #ifdef CONFIG_BLOCK 3199 #endif 3200 #ifdef CONFIG_BLOCK 3201 #endif 3202 #ifdef CONFIG_BLOCK 3203 #endif 3204 /* LDV_COMMENT_END_PREP */ 3205 /* content: static struct ceph_msg *alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip)*/ 3206 /* LDV_COMMENT_BEGIN_PREP */ 3207 #ifdef CONFIG_BLOCK 3208 #endif 3209 #define OSD_OP_FRONT_LEN 4096 3210 #define OSD_OPREPLY_FRONT_LEN 512 3211 #ifdef CONFIG_BLOCK 3212 #endif 3213 #define osd_req_op_data(oreq, whch, typ, fld) \ 3214 ({ \ 3215 BUG_ON(whch >= (oreq)->r_num_ops); \ 3216 &(oreq)->r_ops[whch].typ.fld; \ 3217 }) 3218 #ifdef CONFIG_BLOCK 3219 #endif 3220 #ifdef CONFIG_BLOCK 3221 #endif 3222 #ifdef CONFIG_BLOCK 3223 #endif 3224 /* LDV_COMMENT_END_PREP */ 3225 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "alloc_msg" */ 3226 struct ceph_msg_header * var_group3; 3227 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "alloc_msg" */ 3228 int * var_alloc_msg_97_p2; 3229 /* content: static void osd_reset(struct ceph_connection *con)*/ 3230 /* LDV_COMMENT_BEGIN_PREP */ 3231 #ifdef CONFIG_BLOCK 3232 #endif 3233 #define OSD_OP_FRONT_LEN 4096 3234 #define OSD_OPREPLY_FRONT_LEN 512 3235 #ifdef CONFIG_BLOCK 3236 #endif 3237 #define osd_req_op_data(oreq, whch, typ, fld) \ 3238 ({ \ 3239 BUG_ON(whch >= (oreq)->r_num_ops); \ 3240 &(oreq)->r_ops[whch].typ.fld; \ 3241 }) 3242 #ifdef CONFIG_BLOCK 3243 #endif 3244 #ifdef CONFIG_BLOCK 3245 #endif 3246 #ifdef CONFIG_BLOCK 3247 #endif 3248 /* LDV_COMMENT_END_PREP */ 3249 3250 3251 3252 3253 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 3254 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 3255 /*============================= VARIABLE INITIALIZING PART =============================*/ 3256 LDV_IN_INTERRUPT=1; 3257 3258 3259 3260 3261 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 3262 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 3263 /*============================= FUNCTION CALL SECTION =============================*/ 3264 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 3265 ldv_initialize(); 3266 3267 3268 3269 while( nondet_int() 3270 ) { 3271 3272 switch(nondet_int()) { 3273 3274 case 0: { 3275 3276 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3277 3278 3279 /* content: static struct ceph_connection *get_osd_con(struct ceph_connection *con)*/ 3280 /* LDV_COMMENT_BEGIN_PREP */ 3281 #ifdef CONFIG_BLOCK 3282 #endif 3283 #define OSD_OP_FRONT_LEN 4096 3284 #define OSD_OPREPLY_FRONT_LEN 512 3285 #ifdef CONFIG_BLOCK 3286 #endif 3287 #define osd_req_op_data(oreq, whch, typ, fld) \ 3288 ({ \ 3289 BUG_ON(whch >= (oreq)->r_num_ops); \ 3290 &(oreq)->r_ops[whch].typ.fld; \ 3291 }) 3292 #ifdef CONFIG_BLOCK 3293 #endif 3294 #ifdef CONFIG_BLOCK 3295 #endif 3296 #ifdef CONFIG_BLOCK 3297 #endif 3298 /* LDV_COMMENT_END_PREP */ 3299 /* LDV_COMMENT_FUNCTION_CALL Function from field "get" from driver structure with callbacks "osd_con_ops" */ 3300 ldv_handler_precall(); 3301 get_osd_con( var_group1); 3302 3303 3304 3305 3306 } 3307 3308 break; 3309 case 1: { 3310 3311 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3312 3313 3314 /* content: static void put_osd_con(struct ceph_connection *con)*/ 3315 /* LDV_COMMENT_BEGIN_PREP */ 3316 #ifdef CONFIG_BLOCK 3317 #endif 3318 #define OSD_OP_FRONT_LEN 4096 3319 #define OSD_OPREPLY_FRONT_LEN 512 3320 #ifdef CONFIG_BLOCK 3321 #endif 3322 #define osd_req_op_data(oreq, whch, typ, fld) \ 3323 ({ \ 3324 BUG_ON(whch >= (oreq)->r_num_ops); \ 3325 &(oreq)->r_ops[whch].typ.fld; \ 3326 }) 3327 #ifdef CONFIG_BLOCK 3328 #endif 3329 #ifdef CONFIG_BLOCK 3330 #endif 3331 #ifdef CONFIG_BLOCK 3332 #endif 3333 /* LDV_COMMENT_END_PREP */ 3334 /* LDV_COMMENT_FUNCTION_CALL Function from field "put" from driver structure with callbacks "osd_con_ops" */ 3335 ldv_handler_precall(); 3336 put_osd_con( var_group1); 3337 3338 3339 3340 3341 } 3342 3343 break; 3344 case 2: { 3345 3346 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3347 3348 3349 /* content: static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)*/ 3350 /* LDV_COMMENT_BEGIN_PREP */ 3351 #ifdef CONFIG_BLOCK 3352 #endif 3353 #define OSD_OP_FRONT_LEN 4096 3354 #define OSD_OPREPLY_FRONT_LEN 512 3355 #ifdef CONFIG_BLOCK 3356 #endif 3357 #define osd_req_op_data(oreq, whch, typ, fld) \ 3358 ({ \ 3359 BUG_ON(whch >= (oreq)->r_num_ops); \ 3360 &(oreq)->r_ops[whch].typ.fld; \ 3361 }) 3362 #ifdef CONFIG_BLOCK 3363 #endif 3364 #ifdef CONFIG_BLOCK 3365 #endif 3366 #ifdef CONFIG_BLOCK 3367 #endif 3368 /* LDV_COMMENT_END_PREP */ 3369 /* LDV_COMMENT_FUNCTION_CALL Function from field "dispatch" from driver structure with callbacks "osd_con_ops" */ 3370 ldv_handler_precall(); 3371 dispatch( var_group1, var_group2); 3372 3373 3374 3375 3376 } 3377 3378 break; 3379 case 3: { 3380 3381 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3382 3383 3384 /* content: static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, int *proto, int force_new)*/ 3385 /* LDV_COMMENT_BEGIN_PREP */ 3386 #ifdef CONFIG_BLOCK 3387 #endif 3388 #define OSD_OP_FRONT_LEN 4096 3389 #define OSD_OPREPLY_FRONT_LEN 512 3390 #ifdef CONFIG_BLOCK 3391 #endif 3392 #define osd_req_op_data(oreq, whch, typ, fld) \ 3393 ({ \ 3394 BUG_ON(whch >= (oreq)->r_num_ops); \ 3395 &(oreq)->r_ops[whch].typ.fld; \ 3396 }) 3397 #ifdef CONFIG_BLOCK 3398 #endif 3399 #ifdef CONFIG_BLOCK 3400 #endif 3401 #ifdef CONFIG_BLOCK 3402 #endif 3403 /* LDV_COMMENT_END_PREP */ 3404 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_authorizer" from driver structure with callbacks "osd_con_ops" */ 3405 ldv_handler_precall(); 3406 get_authorizer( var_group1, var_get_authorizer_100_p1, var_get_authorizer_100_p2); 3407 3408 3409 3410 3411 } 3412 3413 break; 3414 case 4: { 3415 3416 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3417 3418 3419 /* content: static int verify_authorizer_reply(struct ceph_connection *con, int len)*/ 3420 /* LDV_COMMENT_BEGIN_PREP */ 3421 #ifdef CONFIG_BLOCK 3422 #endif 3423 #define OSD_OP_FRONT_LEN 4096 3424 #define OSD_OPREPLY_FRONT_LEN 512 3425 #ifdef CONFIG_BLOCK 3426 #endif 3427 #define osd_req_op_data(oreq, whch, typ, fld) \ 3428 ({ \ 3429 BUG_ON(whch >= (oreq)->r_num_ops); \ 3430 &(oreq)->r_ops[whch].typ.fld; \ 3431 }) 3432 #ifdef CONFIG_BLOCK 3433 #endif 3434 #ifdef CONFIG_BLOCK 3435 #endif 3436 #ifdef CONFIG_BLOCK 3437 #endif 3438 /* LDV_COMMENT_END_PREP */ 3439 /* LDV_COMMENT_FUNCTION_CALL Function from field "verify_authorizer_reply" from driver structure with callbacks "osd_con_ops" */ 3440 ldv_handler_precall(); 3441 verify_authorizer_reply( var_group1, var_verify_authorizer_reply_101_p1); 3442 3443 3444 3445 3446 } 3447 3448 break; 3449 case 5: { 3450 3451 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3452 3453 3454 /* content: static int invalidate_authorizer(struct ceph_connection *con)*/ 3455 /* LDV_COMMENT_BEGIN_PREP */ 3456 #ifdef CONFIG_BLOCK 3457 #endif 3458 #define OSD_OP_FRONT_LEN 4096 3459 #define OSD_OPREPLY_FRONT_LEN 512 3460 #ifdef CONFIG_BLOCK 3461 #endif 3462 #define osd_req_op_data(oreq, whch, typ, fld) \ 3463 ({ \ 3464 BUG_ON(whch >= (oreq)->r_num_ops); \ 3465 &(oreq)->r_ops[whch].typ.fld; \ 3466 }) 3467 #ifdef CONFIG_BLOCK 3468 #endif 3469 #ifdef CONFIG_BLOCK 3470 #endif 3471 #ifdef CONFIG_BLOCK 3472 #endif 3473 /* LDV_COMMENT_END_PREP */ 3474 /* LDV_COMMENT_FUNCTION_CALL Function from field "invalidate_authorizer" from driver structure with callbacks "osd_con_ops" */ 3475 ldv_handler_precall(); 3476 invalidate_authorizer( var_group1); 3477 3478 3479 3480 3481 } 3482 3483 break; 3484 case 6: { 3485 3486 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3487 3488 3489 /* content: static struct ceph_msg *alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip)*/ 3490 /* LDV_COMMENT_BEGIN_PREP */ 3491 #ifdef CONFIG_BLOCK 3492 #endif 3493 #define OSD_OP_FRONT_LEN 4096 3494 #define OSD_OPREPLY_FRONT_LEN 512 3495 #ifdef CONFIG_BLOCK 3496 #endif 3497 #define osd_req_op_data(oreq, whch, typ, fld) \ 3498 ({ \ 3499 BUG_ON(whch >= (oreq)->r_num_ops); \ 3500 &(oreq)->r_ops[whch].typ.fld; \ 3501 }) 3502 #ifdef CONFIG_BLOCK 3503 #endif 3504 #ifdef CONFIG_BLOCK 3505 #endif 3506 #ifdef CONFIG_BLOCK 3507 #endif 3508 /* LDV_COMMENT_END_PREP */ 3509 /* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_msg" from driver structure with callbacks "osd_con_ops" */ 3510 ldv_handler_precall(); 3511 alloc_msg( var_group1, var_group3, var_alloc_msg_97_p2); 3512 3513 3514 3515 3516 } 3517 3518 break; 3519 case 7: { 3520 3521 /** STRUCT: struct type: ceph_connection_operations, struct name: osd_con_ops **/ 3522 3523 3524 /* content: static void osd_reset(struct ceph_connection *con)*/ 3525 /* LDV_COMMENT_BEGIN_PREP */ 3526 #ifdef CONFIG_BLOCK 3527 #endif 3528 #define OSD_OP_FRONT_LEN 4096 3529 #define OSD_OPREPLY_FRONT_LEN 512 3530 #ifdef CONFIG_BLOCK 3531 #endif 3532 #define osd_req_op_data(oreq, whch, typ, fld) \ 3533 ({ \ 3534 BUG_ON(whch >= (oreq)->r_num_ops); \ 3535 &(oreq)->r_ops[whch].typ.fld; \ 3536 }) 3537 #ifdef CONFIG_BLOCK 3538 #endif 3539 #ifdef CONFIG_BLOCK 3540 #endif 3541 #ifdef CONFIG_BLOCK 3542 #endif 3543 /* LDV_COMMENT_END_PREP */ 3544 /* LDV_COMMENT_FUNCTION_CALL Function from field "fault" from driver structure with callbacks "osd_con_ops" */ 3545 ldv_handler_precall(); 3546 osd_reset( var_group1); 3547 3548 3549 3550 3551 } 3552 3553 break; 3554 default: break; 3555 3556 } 3557 3558 } 3559 3560 ldv_module_exit: 3561 3562 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 3563 ldv_final: ldv_check_final_state(); 3564 3565 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 3566 return; 3567 3568 } 3569 #endif 3570 3571 /* LDV_COMMENT_END_MAIN */
1 2 3 #include <linux/kernel.h> 4 #include <linux/mutex.h> 5 #include <linux/errno.h> 6 #include <verifier/rcv.h> 7 8 static int ldv_mutex_crush_scratch_mutex_of_ceph_osdmap; 9 10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_crush_scratch_mutex_of_ceph_osdmap') Check that mutex 'crush_scratch_mutex_of_ceph_osdmap' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 11 int ldv_mutex_lock_interruptible_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock) 12 { 13 int nondetermined; 14 15 /* LDV_COMMENT_ASSERT Mutex 'crush_scratch_mutex_of_ceph_osdmap' must be unlocked */ 16 ldv_assert(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 1); 17 18 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 19 nondetermined = ldv_undef_int(); 20 21 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 22 if (nondetermined) 23 { 24 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 25 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 2; 26 /* LDV_COMMENT_RETURN Finish with success */ 27 return 0; 28 } 29 else 30 { 31 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'crush_scratch_mutex_of_ceph_osdmap' is keeped unlocked */ 32 return -EINTR; 33 } 34 } 35 36 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_crush_scratch_mutex_of_ceph_osdmap') Check that mutex 'crush_scratch_mutex_of_ceph_osdmap' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 37 int ldv_mutex_lock_killable_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock) 38 { 39 int nondetermined; 40 41 /* LDV_COMMENT_ASSERT Mutex 'crush_scratch_mutex_of_ceph_osdmap' must be unlocked */ 42 ldv_assert(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 1); 43 44 /* LDV_COMMENT_OTHER Construct nondetermined result */ 45 nondetermined = ldv_undef_int(); 46 47 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 48 if (nondetermined) 49 { 50 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 51 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 2; 52 /* LDV_COMMENT_RETURN Finish with success*/ 53 return 0; 54 } 55 else 56 { 57 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'crush_scratch_mutex_of_ceph_osdmap' is keeped unlocked */ 58 return -EINTR; 59 } 60 } 61 62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_crush_scratch_mutex_of_ceph_osdmap') Check that mutex 'crush_scratch_mutex_of_ceph_osdmap' was not locked and lock it */ 63 void ldv_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock) 64 { 65 /* LDV_COMMENT_ASSERT Mutex 'crush_scratch_mutex_of_ceph_osdmap' must be unlocked */ 66 ldv_assert(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 1); 67 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 68 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 2; 69 } 70 71 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_crush_scratch_mutex_of_ceph_osdmap') Check that mutex 'crush_scratch_mutex_of_ceph_osdmap' was not locked and nondeterministically lock it. Return 0 on fails */ 72 int ldv_mutex_trylock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock) 73 { 74 int is_mutex_held_by_another_thread; 75 76 /* LDV_COMMENT_ASSERT It may be an error if mutex 'crush_scratch_mutex_of_ceph_osdmap' is locked at this point */ 77 ldv_assert(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 1); 78 79 /* LDV_COMMENT_OTHER Construct nondetermined result */ 80 is_mutex_held_by_another_thread = ldv_undef_int(); 81 82 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 83 if (is_mutex_held_by_another_thread) 84 { 85 /* LDV_COMMENT_RETURN Finish with fail */ 86 return 0; 87 } 88 else 89 { 90 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 91 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 2; 92 /* LDV_COMMENT_RETURN Finish with success */ 93 return 1; 94 } 95 } 96 97 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_crush_scratch_mutex_of_ceph_osdmap') Lock mutex 'crush_scratch_mutex_of_ceph_osdmap' if atomic decrement result is zero */ 98 int ldv_atomic_dec_and_mutex_lock_crush_scratch_mutex_of_ceph_osdmap(atomic_t *cnt, struct mutex *lock) 99 { 100 int atomic_value_after_dec; 101 102 /* LDV_COMMENT_ASSERT Mutex 'crush_scratch_mutex_of_ceph_osdmap' must be unlocked (since we may lock it in this function) */ 103 ldv_assert(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 1); 104 105 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 106 atomic_value_after_dec = ldv_undef_int(); 107 108 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 109 if (atomic_value_after_dec == 0) 110 { 111 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'crush_scratch_mutex_of_ceph_osdmap', as atomic has decremented to zero */ 112 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 2; 113 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 114 return 1; 115 } 116 117 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 118 return 0; 119 } 120 121 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 122 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_crush_scratch_mutex_of_ceph_osdmap') Check whether mutex 'crush_scratch_mutex_of_ceph_osdmap' was locked */ 123 int ldv_mutex_is_locked_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock) 124 { 125 int nondetermined; 126 127 if(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 1) 128 { 129 /* LDV_COMMENT_OTHER Construct nondetermined result */ 130 nondetermined = ldv_undef_int(); 131 132 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'crush_scratch_mutex_of_ceph_osdmap' was locked */ 133 if(nondetermined) 134 { 135 /* LDV_COMMENT_RETURN Mutex 'crush_scratch_mutex_of_ceph_osdmap' was unlocked */ 136 return 0; 137 } 138 else 139 { 140 /* LDV_COMMENT_RETURN Mutex 'crush_scratch_mutex_of_ceph_osdmap' was locked */ 141 return 1; 142 } 143 } 144 else 145 { 146 /* LDV_COMMENT_RETURN Mutex 'crush_scratch_mutex_of_ceph_osdmap' was locked */ 147 return 1; 148 } 149 } 150 151 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_crush_scratch_mutex_of_ceph_osdmap') Check that mutex 'crush_scratch_mutex_of_ceph_osdmap' was locked and unlock it */ 152 void ldv_mutex_unlock_crush_scratch_mutex_of_ceph_osdmap(struct mutex *lock) 153 { 154 /* LDV_COMMENT_ASSERT Mutex 'crush_scratch_mutex_of_ceph_osdmap' must be locked */ 155 ldv_assert(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 2); 156 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'crush_scratch_mutex_of_ceph_osdmap' */ 157 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 1; 158 } 159 160 static int ldv_mutex_i_mutex_of_inode; 161 162 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 163 int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock) 164 { 165 int nondetermined; 166 167 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 168 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 169 170 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 171 nondetermined = ldv_undef_int(); 172 173 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 174 if (nondetermined) 175 { 176 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 177 ldv_mutex_i_mutex_of_inode = 2; 178 /* LDV_COMMENT_RETURN Finish with success */ 179 return 0; 180 } 181 else 182 { 183 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'i_mutex_of_inode' is keeped unlocked */ 184 return -EINTR; 185 } 186 } 187 188 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 189 int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock) 190 { 191 int nondetermined; 192 193 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 194 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 195 196 /* LDV_COMMENT_OTHER Construct nondetermined result */ 197 nondetermined = ldv_undef_int(); 198 199 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 200 if (nondetermined) 201 { 202 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 203 ldv_mutex_i_mutex_of_inode = 2; 204 /* LDV_COMMENT_RETURN Finish with success*/ 205 return 0; 206 } 207 else 208 { 209 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'i_mutex_of_inode' is keeped unlocked */ 210 return -EINTR; 211 } 212 } 213 214 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and lock it */ 215 void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock) 216 { 217 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 218 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 219 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 220 ldv_mutex_i_mutex_of_inode = 2; 221 } 222 223 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and nondeterministically lock it. Return 0 on fails */ 224 int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock) 225 { 226 int is_mutex_held_by_another_thread; 227 228 /* LDV_COMMENT_ASSERT It may be an error if mutex 'i_mutex_of_inode' is locked at this point */ 229 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 230 231 /* LDV_COMMENT_OTHER Construct nondetermined result */ 232 is_mutex_held_by_another_thread = ldv_undef_int(); 233 234 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 235 if (is_mutex_held_by_another_thread) 236 { 237 /* LDV_COMMENT_RETURN Finish with fail */ 238 return 0; 239 } 240 else 241 { 242 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 243 ldv_mutex_i_mutex_of_inode = 2; 244 /* LDV_COMMENT_RETURN Finish with success */ 245 return 1; 246 } 247 } 248 249 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode') Lock mutex 'i_mutex_of_inode' if atomic decrement result is zero */ 250 int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock) 251 { 252 int atomic_value_after_dec; 253 254 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked (since we may lock it in this function) */ 255 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 256 257 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 258 atomic_value_after_dec = ldv_undef_int(); 259 260 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 261 if (atomic_value_after_dec == 0) 262 { 263 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode', as atomic has decremented to zero */ 264 ldv_mutex_i_mutex_of_inode = 2; 265 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'i_mutex_of_inode' */ 266 return 1; 267 } 268 269 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'i_mutex_of_inode' */ 270 return 0; 271 } 272 273 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 274 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_i_mutex_of_inode') Check whether mutex 'i_mutex_of_inode' was locked */ 275 int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock) 276 { 277 int nondetermined; 278 279 if(ldv_mutex_i_mutex_of_inode == 1) 280 { 281 /* LDV_COMMENT_OTHER Construct nondetermined result */ 282 nondetermined = ldv_undef_int(); 283 284 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'i_mutex_of_inode' was locked */ 285 if(nondetermined) 286 { 287 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was unlocked */ 288 return 0; 289 } 290 else 291 { 292 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */ 293 return 1; 294 } 295 } 296 else 297 { 298 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */ 299 return 1; 300 } 301 } 302 303 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was locked and unlock it */ 304 void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock) 305 { 306 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be locked */ 307 ldv_assert(ldv_mutex_i_mutex_of_inode == 2); 308 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'i_mutex_of_inode' */ 309 ldv_mutex_i_mutex_of_inode = 1; 310 } 311 312 static int ldv_mutex_lock; 313 314 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_lock') Check that mutex 'lock' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 315 int ldv_mutex_lock_interruptible_lock(struct mutex *lock) 316 { 317 int nondetermined; 318 319 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 320 ldv_assert(ldv_mutex_lock == 1); 321 322 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 323 nondetermined = ldv_undef_int(); 324 325 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 326 if (nondetermined) 327 { 328 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 329 ldv_mutex_lock = 2; 330 /* LDV_COMMENT_RETURN Finish with success */ 331 return 0; 332 } 333 else 334 { 335 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'lock' is keeped unlocked */ 336 return -EINTR; 337 } 338 } 339 340 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_lock') Check that mutex 'lock' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 341 int ldv_mutex_lock_killable_lock(struct mutex *lock) 342 { 343 int nondetermined; 344 345 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 346 ldv_assert(ldv_mutex_lock == 1); 347 348 /* LDV_COMMENT_OTHER Construct nondetermined result */ 349 nondetermined = ldv_undef_int(); 350 351 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 352 if (nondetermined) 353 { 354 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 355 ldv_mutex_lock = 2; 356 /* LDV_COMMENT_RETURN Finish with success*/ 357 return 0; 358 } 359 else 360 { 361 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'lock' is keeped unlocked */ 362 return -EINTR; 363 } 364 } 365 366 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_lock') Check that mutex 'lock' was not locked and lock it */ 367 void ldv_mutex_lock_lock(struct mutex *lock) 368 { 369 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 370 ldv_assert(ldv_mutex_lock == 1); 371 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 372 ldv_mutex_lock = 2; 373 } 374 375 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_lock') Check that mutex 'lock' was not locked and nondeterministically lock it. Return 0 on fails */ 376 int ldv_mutex_trylock_lock(struct mutex *lock) 377 { 378 int is_mutex_held_by_another_thread; 379 380 /* LDV_COMMENT_ASSERT It may be an error if mutex 'lock' is locked at this point */ 381 ldv_assert(ldv_mutex_lock == 1); 382 383 /* LDV_COMMENT_OTHER Construct nondetermined result */ 384 is_mutex_held_by_another_thread = ldv_undef_int(); 385 386 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 387 if (is_mutex_held_by_another_thread) 388 { 389 /* LDV_COMMENT_RETURN Finish with fail */ 390 return 0; 391 } 392 else 393 { 394 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 395 ldv_mutex_lock = 2; 396 /* LDV_COMMENT_RETURN Finish with success */ 397 return 1; 398 } 399 } 400 401 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_lock') Lock mutex 'lock' if atomic decrement result is zero */ 402 int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock) 403 { 404 int atomic_value_after_dec; 405 406 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked (since we may lock it in this function) */ 407 ldv_assert(ldv_mutex_lock == 1); 408 409 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 410 atomic_value_after_dec = ldv_undef_int(); 411 412 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 413 if (atomic_value_after_dec == 0) 414 { 415 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock', as atomic has decremented to zero */ 416 ldv_mutex_lock = 2; 417 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'lock' */ 418 return 1; 419 } 420 421 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'lock' */ 422 return 0; 423 } 424 425 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 426 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_lock') Check whether mutex 'lock' was locked */ 427 int ldv_mutex_is_locked_lock(struct mutex *lock) 428 { 429 int nondetermined; 430 431 if(ldv_mutex_lock == 1) 432 { 433 /* LDV_COMMENT_OTHER Construct nondetermined result */ 434 nondetermined = ldv_undef_int(); 435 436 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'lock' was locked */ 437 if(nondetermined) 438 { 439 /* LDV_COMMENT_RETURN Mutex 'lock' was unlocked */ 440 return 0; 441 } 442 else 443 { 444 /* LDV_COMMENT_RETURN Mutex 'lock' was locked */ 445 return 1; 446 } 447 } 448 else 449 { 450 /* LDV_COMMENT_RETURN Mutex 'lock' was locked */ 451 return 1; 452 } 453 } 454 455 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_lock') Check that mutex 'lock' was locked and unlock it */ 456 void ldv_mutex_unlock_lock(struct mutex *lock) 457 { 458 /* LDV_COMMENT_ASSERT Mutex 'lock' must be locked */ 459 ldv_assert(ldv_mutex_lock == 2); 460 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'lock' */ 461 ldv_mutex_lock = 1; 462 } 463 464 static int ldv_mutex_mount_mutex_of_ceph_client; 465 466 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mount_mutex_of_ceph_client') Check that mutex 'mount_mutex_of_ceph_client' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 467 int ldv_mutex_lock_interruptible_mount_mutex_of_ceph_client(struct mutex *lock) 468 { 469 int nondetermined; 470 471 /* LDV_COMMENT_ASSERT Mutex 'mount_mutex_of_ceph_client' must be unlocked */ 472 ldv_assert(ldv_mutex_mount_mutex_of_ceph_client == 1); 473 474 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 475 nondetermined = ldv_undef_int(); 476 477 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mount_mutex_of_ceph_client' */ 478 if (nondetermined) 479 { 480 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mount_mutex_of_ceph_client' */ 481 ldv_mutex_mount_mutex_of_ceph_client = 2; 482 /* LDV_COMMENT_RETURN Finish with success */ 483 return 0; 484 } 485 else 486 { 487 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mount_mutex_of_ceph_client' is keeped unlocked */ 488 return -EINTR; 489 } 490 } 491 492 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mount_mutex_of_ceph_client') Check that mutex 'mount_mutex_of_ceph_client' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 493 int ldv_mutex_lock_killable_mount_mutex_of_ceph_client(struct mutex *lock) 494 { 495 int nondetermined; 496 497 /* LDV_COMMENT_ASSERT Mutex 'mount_mutex_of_ceph_client' must be unlocked */ 498 ldv_assert(ldv_mutex_mount_mutex_of_ceph_client == 1); 499 500 /* LDV_COMMENT_OTHER Construct nondetermined result */ 501 nondetermined = ldv_undef_int(); 502 503 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mount_mutex_of_ceph_client' */ 504 if (nondetermined) 505 { 506 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mount_mutex_of_ceph_client' */ 507 ldv_mutex_mount_mutex_of_ceph_client = 2; 508 /* LDV_COMMENT_RETURN Finish with success*/ 509 return 0; 510 } 511 else 512 { 513 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mount_mutex_of_ceph_client' is keeped unlocked */ 514 return -EINTR; 515 } 516 } 517 518 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mount_mutex_of_ceph_client') Check that mutex 'mount_mutex_of_ceph_client' was not locked and lock it */ 519 void ldv_mutex_lock_mount_mutex_of_ceph_client(struct mutex *lock) 520 { 521 /* LDV_COMMENT_ASSERT Mutex 'mount_mutex_of_ceph_client' must be unlocked */ 522 ldv_assert(ldv_mutex_mount_mutex_of_ceph_client == 1); 523 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mount_mutex_of_ceph_client' */ 524 ldv_mutex_mount_mutex_of_ceph_client = 2; 525 } 526 527 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mount_mutex_of_ceph_client') Check that mutex 'mount_mutex_of_ceph_client' was not locked and nondeterministically lock it. Return 0 on fails */ 528 int ldv_mutex_trylock_mount_mutex_of_ceph_client(struct mutex *lock) 529 { 530 int is_mutex_held_by_another_thread; 531 532 /* LDV_COMMENT_ASSERT It may be an error if mutex 'mount_mutex_of_ceph_client' is locked at this point */ 533 ldv_assert(ldv_mutex_mount_mutex_of_ceph_client == 1); 534 535 /* LDV_COMMENT_OTHER Construct nondetermined result */ 536 is_mutex_held_by_another_thread = ldv_undef_int(); 537 538 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mount_mutex_of_ceph_client' */ 539 if (is_mutex_held_by_another_thread) 540 { 541 /* LDV_COMMENT_RETURN Finish with fail */ 542 return 0; 543 } 544 else 545 { 546 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mount_mutex_of_ceph_client' */ 547 ldv_mutex_mount_mutex_of_ceph_client = 2; 548 /* LDV_COMMENT_RETURN Finish with success */ 549 return 1; 550 } 551 } 552 553 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mount_mutex_of_ceph_client') Lock mutex 'mount_mutex_of_ceph_client' if atomic decrement result is zero */ 554 int ldv_atomic_dec_and_mutex_lock_mount_mutex_of_ceph_client(atomic_t *cnt, struct mutex *lock) 555 { 556 int atomic_value_after_dec; 557 558 /* LDV_COMMENT_ASSERT Mutex 'mount_mutex_of_ceph_client' must be unlocked (since we may lock it in this function) */ 559 ldv_assert(ldv_mutex_mount_mutex_of_ceph_client == 1); 560 561 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 562 atomic_value_after_dec = ldv_undef_int(); 563 564 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 565 if (atomic_value_after_dec == 0) 566 { 567 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mount_mutex_of_ceph_client', as atomic has decremented to zero */ 568 ldv_mutex_mount_mutex_of_ceph_client = 2; 569 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mount_mutex_of_ceph_client' */ 570 return 1; 571 } 572 573 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mount_mutex_of_ceph_client' */ 574 return 0; 575 } 576 577 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 578 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mount_mutex_of_ceph_client') Check whether mutex 'mount_mutex_of_ceph_client' was locked */ 579 int ldv_mutex_is_locked_mount_mutex_of_ceph_client(struct mutex *lock) 580 { 581 int nondetermined; 582 583 if(ldv_mutex_mount_mutex_of_ceph_client == 1) 584 { 585 /* LDV_COMMENT_OTHER Construct nondetermined result */ 586 nondetermined = ldv_undef_int(); 587 588 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mount_mutex_of_ceph_client' was locked */ 589 if(nondetermined) 590 { 591 /* LDV_COMMENT_RETURN Mutex 'mount_mutex_of_ceph_client' was unlocked */ 592 return 0; 593 } 594 else 595 { 596 /* LDV_COMMENT_RETURN Mutex 'mount_mutex_of_ceph_client' was locked */ 597 return 1; 598 } 599 } 600 else 601 { 602 /* LDV_COMMENT_RETURN Mutex 'mount_mutex_of_ceph_client' was locked */ 603 return 1; 604 } 605 } 606 607 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mount_mutex_of_ceph_client') Check that mutex 'mount_mutex_of_ceph_client' was locked and unlock it */ 608 void ldv_mutex_unlock_mount_mutex_of_ceph_client(struct mutex *lock) 609 { 610 /* LDV_COMMENT_ASSERT Mutex 'mount_mutex_of_ceph_client' must be locked */ 611 ldv_assert(ldv_mutex_mount_mutex_of_ceph_client == 2); 612 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mount_mutex_of_ceph_client' */ 613 ldv_mutex_mount_mutex_of_ceph_client = 1; 614 } 615 616 static int ldv_mutex_mutex_of_ceph_auth_client; 617 618 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mutex_of_ceph_auth_client') Check that mutex 'mutex_of_ceph_auth_client' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 619 int ldv_mutex_lock_interruptible_mutex_of_ceph_auth_client(struct mutex *lock) 620 { 621 int nondetermined; 622 623 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_auth_client' must be unlocked */ 624 ldv_assert(ldv_mutex_mutex_of_ceph_auth_client == 1); 625 626 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 627 nondetermined = ldv_undef_int(); 628 629 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_auth_client' */ 630 if (nondetermined) 631 { 632 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_auth_client' */ 633 ldv_mutex_mutex_of_ceph_auth_client = 2; 634 /* LDV_COMMENT_RETURN Finish with success */ 635 return 0; 636 } 637 else 638 { 639 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mutex_of_ceph_auth_client' is keeped unlocked */ 640 return -EINTR; 641 } 642 } 643 644 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mutex_of_ceph_auth_client') Check that mutex 'mutex_of_ceph_auth_client' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 645 int ldv_mutex_lock_killable_mutex_of_ceph_auth_client(struct mutex *lock) 646 { 647 int nondetermined; 648 649 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_auth_client' must be unlocked */ 650 ldv_assert(ldv_mutex_mutex_of_ceph_auth_client == 1); 651 652 /* LDV_COMMENT_OTHER Construct nondetermined result */ 653 nondetermined = ldv_undef_int(); 654 655 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_auth_client' */ 656 if (nondetermined) 657 { 658 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_auth_client' */ 659 ldv_mutex_mutex_of_ceph_auth_client = 2; 660 /* LDV_COMMENT_RETURN Finish with success*/ 661 return 0; 662 } 663 else 664 { 665 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mutex_of_ceph_auth_client' is keeped unlocked */ 666 return -EINTR; 667 } 668 } 669 670 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mutex_of_ceph_auth_client') Check that mutex 'mutex_of_ceph_auth_client' was not locked and lock it */ 671 void ldv_mutex_lock_mutex_of_ceph_auth_client(struct mutex *lock) 672 { 673 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_auth_client' must be unlocked */ 674 ldv_assert(ldv_mutex_mutex_of_ceph_auth_client == 1); 675 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_auth_client' */ 676 ldv_mutex_mutex_of_ceph_auth_client = 2; 677 } 678 679 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mutex_of_ceph_auth_client') Check that mutex 'mutex_of_ceph_auth_client' was not locked and nondeterministically lock it. Return 0 on fails */ 680 int ldv_mutex_trylock_mutex_of_ceph_auth_client(struct mutex *lock) 681 { 682 int is_mutex_held_by_another_thread; 683 684 /* LDV_COMMENT_ASSERT It may be an error if mutex 'mutex_of_ceph_auth_client' is locked at this point */ 685 ldv_assert(ldv_mutex_mutex_of_ceph_auth_client == 1); 686 687 /* LDV_COMMENT_OTHER Construct nondetermined result */ 688 is_mutex_held_by_another_thread = ldv_undef_int(); 689 690 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_auth_client' */ 691 if (is_mutex_held_by_another_thread) 692 { 693 /* LDV_COMMENT_RETURN Finish with fail */ 694 return 0; 695 } 696 else 697 { 698 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_auth_client' */ 699 ldv_mutex_mutex_of_ceph_auth_client = 2; 700 /* LDV_COMMENT_RETURN Finish with success */ 701 return 1; 702 } 703 } 704 705 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_auth_client') Lock mutex 'mutex_of_ceph_auth_client' if atomic decrement result is zero */ 706 int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_auth_client(atomic_t *cnt, struct mutex *lock) 707 { 708 int atomic_value_after_dec; 709 710 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_auth_client' must be unlocked (since we may lock it in this function) */ 711 ldv_assert(ldv_mutex_mutex_of_ceph_auth_client == 1); 712 713 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 714 atomic_value_after_dec = ldv_undef_int(); 715 716 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 717 if (atomic_value_after_dec == 0) 718 { 719 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_auth_client', as atomic has decremented to zero */ 720 ldv_mutex_mutex_of_ceph_auth_client = 2; 721 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mutex_of_ceph_auth_client' */ 722 return 1; 723 } 724 725 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mutex_of_ceph_auth_client' */ 726 return 0; 727 } 728 729 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 730 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mutex_of_ceph_auth_client') Check whether mutex 'mutex_of_ceph_auth_client' was locked */ 731 int ldv_mutex_is_locked_mutex_of_ceph_auth_client(struct mutex *lock) 732 { 733 int nondetermined; 734 735 if(ldv_mutex_mutex_of_ceph_auth_client == 1) 736 { 737 /* LDV_COMMENT_OTHER Construct nondetermined result */ 738 nondetermined = ldv_undef_int(); 739 740 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mutex_of_ceph_auth_client' was locked */ 741 if(nondetermined) 742 { 743 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_auth_client' was unlocked */ 744 return 0; 745 } 746 else 747 { 748 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_auth_client' was locked */ 749 return 1; 750 } 751 } 752 else 753 { 754 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_auth_client' was locked */ 755 return 1; 756 } 757 } 758 759 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mutex_of_ceph_auth_client') Check that mutex 'mutex_of_ceph_auth_client' was locked and unlock it */ 760 void ldv_mutex_unlock_mutex_of_ceph_auth_client(struct mutex *lock) 761 { 762 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_auth_client' must be locked */ 763 ldv_assert(ldv_mutex_mutex_of_ceph_auth_client == 2); 764 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mutex_of_ceph_auth_client' */ 765 ldv_mutex_mutex_of_ceph_auth_client = 1; 766 } 767 768 static int ldv_mutex_mutex_of_ceph_connection; 769 770 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mutex_of_ceph_connection') Check that mutex 'mutex_of_ceph_connection' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 771 int ldv_mutex_lock_interruptible_mutex_of_ceph_connection(struct mutex *lock) 772 { 773 int nondetermined; 774 775 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_connection' must be unlocked */ 776 ldv_assert(ldv_mutex_mutex_of_ceph_connection == 1); 777 778 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 779 nondetermined = ldv_undef_int(); 780 781 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_connection' */ 782 if (nondetermined) 783 { 784 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_connection' */ 785 ldv_mutex_mutex_of_ceph_connection = 2; 786 /* LDV_COMMENT_RETURN Finish with success */ 787 return 0; 788 } 789 else 790 { 791 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mutex_of_ceph_connection' is keeped unlocked */ 792 return -EINTR; 793 } 794 } 795 796 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mutex_of_ceph_connection') Check that mutex 'mutex_of_ceph_connection' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 797 int ldv_mutex_lock_killable_mutex_of_ceph_connection(struct mutex *lock) 798 { 799 int nondetermined; 800 801 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_connection' must be unlocked */ 802 ldv_assert(ldv_mutex_mutex_of_ceph_connection == 1); 803 804 /* LDV_COMMENT_OTHER Construct nondetermined result */ 805 nondetermined = ldv_undef_int(); 806 807 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_connection' */ 808 if (nondetermined) 809 { 810 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_connection' */ 811 ldv_mutex_mutex_of_ceph_connection = 2; 812 /* LDV_COMMENT_RETURN Finish with success*/ 813 return 0; 814 } 815 else 816 { 817 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mutex_of_ceph_connection' is keeped unlocked */ 818 return -EINTR; 819 } 820 } 821 822 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mutex_of_ceph_connection') Check that mutex 'mutex_of_ceph_connection' was not locked and lock it */ 823 void ldv_mutex_lock_mutex_of_ceph_connection(struct mutex *lock) 824 { 825 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_connection' must be unlocked */ 826 ldv_assert(ldv_mutex_mutex_of_ceph_connection == 1); 827 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_connection' */ 828 ldv_mutex_mutex_of_ceph_connection = 2; 829 } 830 831 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mutex_of_ceph_connection') Check that mutex 'mutex_of_ceph_connection' was not locked and nondeterministically lock it. Return 0 on fails */ 832 int ldv_mutex_trylock_mutex_of_ceph_connection(struct mutex *lock) 833 { 834 int is_mutex_held_by_another_thread; 835 836 /* LDV_COMMENT_ASSERT It may be an error if mutex 'mutex_of_ceph_connection' is locked at this point */ 837 ldv_assert(ldv_mutex_mutex_of_ceph_connection == 1); 838 839 /* LDV_COMMENT_OTHER Construct nondetermined result */ 840 is_mutex_held_by_another_thread = ldv_undef_int(); 841 842 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_connection' */ 843 if (is_mutex_held_by_another_thread) 844 { 845 /* LDV_COMMENT_RETURN Finish with fail */ 846 return 0; 847 } 848 else 849 { 850 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_connection' */ 851 ldv_mutex_mutex_of_ceph_connection = 2; 852 /* LDV_COMMENT_RETURN Finish with success */ 853 return 1; 854 } 855 } 856 857 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_connection') Lock mutex 'mutex_of_ceph_connection' if atomic decrement result is zero */ 858 int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_connection(atomic_t *cnt, struct mutex *lock) 859 { 860 int atomic_value_after_dec; 861 862 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_connection' must be unlocked (since we may lock it in this function) */ 863 ldv_assert(ldv_mutex_mutex_of_ceph_connection == 1); 864 865 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 866 atomic_value_after_dec = ldv_undef_int(); 867 868 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 869 if (atomic_value_after_dec == 0) 870 { 871 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_connection', as atomic has decremented to zero */ 872 ldv_mutex_mutex_of_ceph_connection = 2; 873 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mutex_of_ceph_connection' */ 874 return 1; 875 } 876 877 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mutex_of_ceph_connection' */ 878 return 0; 879 } 880 881 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 882 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mutex_of_ceph_connection') Check whether mutex 'mutex_of_ceph_connection' was locked */ 883 int ldv_mutex_is_locked_mutex_of_ceph_connection(struct mutex *lock) 884 { 885 int nondetermined; 886 887 if(ldv_mutex_mutex_of_ceph_connection == 1) 888 { 889 /* LDV_COMMENT_OTHER Construct nondetermined result */ 890 nondetermined = ldv_undef_int(); 891 892 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mutex_of_ceph_connection' was locked */ 893 if(nondetermined) 894 { 895 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_connection' was unlocked */ 896 return 0; 897 } 898 else 899 { 900 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_connection' was locked */ 901 return 1; 902 } 903 } 904 else 905 { 906 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_connection' was locked */ 907 return 1; 908 } 909 } 910 911 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mutex_of_ceph_connection') Check that mutex 'mutex_of_ceph_connection' was locked and unlock it */ 912 void ldv_mutex_unlock_mutex_of_ceph_connection(struct mutex *lock) 913 { 914 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_connection' must be locked */ 915 ldv_assert(ldv_mutex_mutex_of_ceph_connection == 2); 916 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mutex_of_ceph_connection' */ 917 ldv_mutex_mutex_of_ceph_connection = 1; 918 } 919 920 static int ldv_mutex_mutex_of_ceph_mon_client; 921 922 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mutex_of_ceph_mon_client') Check that mutex 'mutex_of_ceph_mon_client' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 923 int ldv_mutex_lock_interruptible_mutex_of_ceph_mon_client(struct mutex *lock) 924 { 925 int nondetermined; 926 927 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_mon_client' must be unlocked */ 928 ldv_assert(ldv_mutex_mutex_of_ceph_mon_client == 1); 929 930 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 931 nondetermined = ldv_undef_int(); 932 933 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_mon_client' */ 934 if (nondetermined) 935 { 936 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_mon_client' */ 937 ldv_mutex_mutex_of_ceph_mon_client = 2; 938 /* LDV_COMMENT_RETURN Finish with success */ 939 return 0; 940 } 941 else 942 { 943 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mutex_of_ceph_mon_client' is keeped unlocked */ 944 return -EINTR; 945 } 946 } 947 948 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mutex_of_ceph_mon_client') Check that mutex 'mutex_of_ceph_mon_client' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 949 int ldv_mutex_lock_killable_mutex_of_ceph_mon_client(struct mutex *lock) 950 { 951 int nondetermined; 952 953 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_mon_client' must be unlocked */ 954 ldv_assert(ldv_mutex_mutex_of_ceph_mon_client == 1); 955 956 /* LDV_COMMENT_OTHER Construct nondetermined result */ 957 nondetermined = ldv_undef_int(); 958 959 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_mon_client' */ 960 if (nondetermined) 961 { 962 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_mon_client' */ 963 ldv_mutex_mutex_of_ceph_mon_client = 2; 964 /* LDV_COMMENT_RETURN Finish with success*/ 965 return 0; 966 } 967 else 968 { 969 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mutex_of_ceph_mon_client' is keeped unlocked */ 970 return -EINTR; 971 } 972 } 973 974 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mutex_of_ceph_mon_client') Check that mutex 'mutex_of_ceph_mon_client' was not locked and lock it */ 975 void ldv_mutex_lock_mutex_of_ceph_mon_client(struct mutex *lock) 976 { 977 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_mon_client' must be unlocked */ 978 ldv_assert(ldv_mutex_mutex_of_ceph_mon_client == 1); 979 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_mon_client' */ 980 ldv_mutex_mutex_of_ceph_mon_client = 2; 981 } 982 983 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mutex_of_ceph_mon_client') Check that mutex 'mutex_of_ceph_mon_client' was not locked and nondeterministically lock it. Return 0 on fails */ 984 int ldv_mutex_trylock_mutex_of_ceph_mon_client(struct mutex *lock) 985 { 986 int is_mutex_held_by_another_thread; 987 988 /* LDV_COMMENT_ASSERT It may be an error if mutex 'mutex_of_ceph_mon_client' is locked at this point */ 989 ldv_assert(ldv_mutex_mutex_of_ceph_mon_client == 1); 990 991 /* LDV_COMMENT_OTHER Construct nondetermined result */ 992 is_mutex_held_by_another_thread = ldv_undef_int(); 993 994 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_ceph_mon_client' */ 995 if (is_mutex_held_by_another_thread) 996 { 997 /* LDV_COMMENT_RETURN Finish with fail */ 998 return 0; 999 } 1000 else 1001 { 1002 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_mon_client' */ 1003 ldv_mutex_mutex_of_ceph_mon_client = 2; 1004 /* LDV_COMMENT_RETURN Finish with success */ 1005 return 1; 1006 } 1007 } 1008 1009 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_mon_client') Lock mutex 'mutex_of_ceph_mon_client' if atomic decrement result is zero */ 1010 int ldv_atomic_dec_and_mutex_lock_mutex_of_ceph_mon_client(atomic_t *cnt, struct mutex *lock) 1011 { 1012 int atomic_value_after_dec; 1013 1014 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_mon_client' must be unlocked (since we may lock it in this function) */ 1015 ldv_assert(ldv_mutex_mutex_of_ceph_mon_client == 1); 1016 1017 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 1018 atomic_value_after_dec = ldv_undef_int(); 1019 1020 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 1021 if (atomic_value_after_dec == 0) 1022 { 1023 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_ceph_mon_client', as atomic has decremented to zero */ 1024 ldv_mutex_mutex_of_ceph_mon_client = 2; 1025 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mutex_of_ceph_mon_client' */ 1026 return 1; 1027 } 1028 1029 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mutex_of_ceph_mon_client' */ 1030 return 0; 1031 } 1032 1033 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 1034 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mutex_of_ceph_mon_client') Check whether mutex 'mutex_of_ceph_mon_client' was locked */ 1035 int ldv_mutex_is_locked_mutex_of_ceph_mon_client(struct mutex *lock) 1036 { 1037 int nondetermined; 1038 1039 if(ldv_mutex_mutex_of_ceph_mon_client == 1) 1040 { 1041 /* LDV_COMMENT_OTHER Construct nondetermined result */ 1042 nondetermined = ldv_undef_int(); 1043 1044 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mutex_of_ceph_mon_client' was locked */ 1045 if(nondetermined) 1046 { 1047 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_mon_client' was unlocked */ 1048 return 0; 1049 } 1050 else 1051 { 1052 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_mon_client' was locked */ 1053 return 1; 1054 } 1055 } 1056 else 1057 { 1058 /* LDV_COMMENT_RETURN Mutex 'mutex_of_ceph_mon_client' was locked */ 1059 return 1; 1060 } 1061 } 1062 1063 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mutex_of_ceph_mon_client') Check that mutex 'mutex_of_ceph_mon_client' was locked and unlock it */ 1064 void ldv_mutex_unlock_mutex_of_ceph_mon_client(struct mutex *lock) 1065 { 1066 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_mon_client' must be locked */ 1067 ldv_assert(ldv_mutex_mutex_of_ceph_mon_client == 2); 1068 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mutex_of_ceph_mon_client' */ 1069 ldv_mutex_mutex_of_ceph_mon_client = 1; 1070 } 1071 1072 static int ldv_mutex_mutex_of_device; 1073 1074 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mutex_of_device') Check that mutex 'mutex_of_device' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 1075 int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock) 1076 { 1077 int nondetermined; 1078 1079 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 1080 ldv_assert(ldv_mutex_mutex_of_device == 1); 1081 1082 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 1083 nondetermined = ldv_undef_int(); 1084 1085 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 1086 if (nondetermined) 1087 { 1088 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 1089 ldv_mutex_mutex_of_device = 2; 1090 /* LDV_COMMENT_RETURN Finish with success */ 1091 return 0; 1092 } 1093 else 1094 { 1095 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mutex_of_device' is keeped unlocked */ 1096 return -EINTR; 1097 } 1098 } 1099 1100 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mutex_of_device') Check that mutex 'mutex_of_device' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 1101 int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock) 1102 { 1103 int nondetermined; 1104 1105 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 1106 ldv_assert(ldv_mutex_mutex_of_device == 1); 1107 1108 /* LDV_COMMENT_OTHER Construct nondetermined result */ 1109 nondetermined = ldv_undef_int(); 1110 1111 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 1112 if (nondetermined) 1113 { 1114 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 1115 ldv_mutex_mutex_of_device = 2; 1116 /* LDV_COMMENT_RETURN Finish with success*/ 1117 return 0; 1118 } 1119 else 1120 { 1121 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mutex_of_device' is keeped unlocked */ 1122 return -EINTR; 1123 } 1124 } 1125 1126 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and lock it */ 1127 void ldv_mutex_lock_mutex_of_device(struct mutex *lock) 1128 { 1129 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 1130 ldv_assert(ldv_mutex_mutex_of_device == 1); 1131 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 1132 ldv_mutex_mutex_of_device = 2; 1133 } 1134 1135 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and nondeterministically lock it. Return 0 on fails */ 1136 int ldv_mutex_trylock_mutex_of_device(struct mutex *lock) 1137 { 1138 int is_mutex_held_by_another_thread; 1139 1140 /* LDV_COMMENT_ASSERT It may be an error if mutex 'mutex_of_device' is locked at this point */ 1141 ldv_assert(ldv_mutex_mutex_of_device == 1); 1142 1143 /* LDV_COMMENT_OTHER Construct nondetermined result */ 1144 is_mutex_held_by_another_thread = ldv_undef_int(); 1145 1146 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 1147 if (is_mutex_held_by_another_thread) 1148 { 1149 /* LDV_COMMENT_RETURN Finish with fail */ 1150 return 0; 1151 } 1152 else 1153 { 1154 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 1155 ldv_mutex_mutex_of_device = 2; 1156 /* LDV_COMMENT_RETURN Finish with success */ 1157 return 1; 1158 } 1159 } 1160 1161 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mutex_of_device') Lock mutex 'mutex_of_device' if atomic decrement result is zero */ 1162 int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock) 1163 { 1164 int atomic_value_after_dec; 1165 1166 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked (since we may lock it in this function) */ 1167 ldv_assert(ldv_mutex_mutex_of_device == 1); 1168 1169 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 1170 atomic_value_after_dec = ldv_undef_int(); 1171 1172 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 1173 if (atomic_value_after_dec == 0) 1174 { 1175 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device', as atomic has decremented to zero */ 1176 ldv_mutex_mutex_of_device = 2; 1177 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mutex_of_device' */ 1178 return 1; 1179 } 1180 1181 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mutex_of_device' */ 1182 return 0; 1183 } 1184 1185 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 1186 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mutex_of_device') Check whether mutex 'mutex_of_device' was locked */ 1187 int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock) 1188 { 1189 int nondetermined; 1190 1191 if(ldv_mutex_mutex_of_device == 1) 1192 { 1193 /* LDV_COMMENT_OTHER Construct nondetermined result */ 1194 nondetermined = ldv_undef_int(); 1195 1196 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mutex_of_device' was locked */ 1197 if(nondetermined) 1198 { 1199 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was unlocked */ 1200 return 0; 1201 } 1202 else 1203 { 1204 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */ 1205 return 1; 1206 } 1207 } 1208 else 1209 { 1210 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */ 1211 return 1; 1212 } 1213 } 1214 1215 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mutex_of_device') Check that mutex 'mutex_of_device' was locked and unlock it */ 1216 void ldv_mutex_unlock_mutex_of_device(struct mutex *lock) 1217 { 1218 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be locked */ 1219 ldv_assert(ldv_mutex_mutex_of_device == 2); 1220 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mutex_of_device' */ 1221 ldv_mutex_mutex_of_device = 1; 1222 } 1223 1224 static int ldv_mutex_request_mutex_of_ceph_osd_client; 1225 1226 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_request_mutex_of_ceph_osd_client') Check that mutex 'request_mutex_of_ceph_osd_client' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 1227 int ldv_mutex_lock_interruptible_request_mutex_of_ceph_osd_client(struct mutex *lock) 1228 { 1229 int nondetermined; 1230 1231 /* LDV_COMMENT_ASSERT Mutex 'request_mutex_of_ceph_osd_client' must be unlocked */ 1232 ldv_assert(ldv_mutex_request_mutex_of_ceph_osd_client == 1); 1233 1234 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 1235 nondetermined = ldv_undef_int(); 1236 1237 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'request_mutex_of_ceph_osd_client' */ 1238 if (nondetermined) 1239 { 1240 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'request_mutex_of_ceph_osd_client' */ 1241 ldv_mutex_request_mutex_of_ceph_osd_client = 2; 1242 /* LDV_COMMENT_RETURN Finish with success */ 1243 return 0; 1244 } 1245 else 1246 { 1247 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'request_mutex_of_ceph_osd_client' is keeped unlocked */ 1248 return -EINTR; 1249 } 1250 } 1251 1252 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_request_mutex_of_ceph_osd_client') Check that mutex 'request_mutex_of_ceph_osd_client' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 1253 int ldv_mutex_lock_killable_request_mutex_of_ceph_osd_client(struct mutex *lock) 1254 { 1255 int nondetermined; 1256 1257 /* LDV_COMMENT_ASSERT Mutex 'request_mutex_of_ceph_osd_client' must be unlocked */ 1258 ldv_assert(ldv_mutex_request_mutex_of_ceph_osd_client == 1); 1259 1260 /* LDV_COMMENT_OTHER Construct nondetermined result */ 1261 nondetermined = ldv_undef_int(); 1262 1263 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'request_mutex_of_ceph_osd_client' */ 1264 if (nondetermined) 1265 { 1266 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'request_mutex_of_ceph_osd_client' */ 1267 ldv_mutex_request_mutex_of_ceph_osd_client = 2; 1268 /* LDV_COMMENT_RETURN Finish with success*/ 1269 return 0; 1270 } 1271 else 1272 { 1273 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'request_mutex_of_ceph_osd_client' is keeped unlocked */ 1274 return -EINTR; 1275 } 1276 } 1277 1278 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_request_mutex_of_ceph_osd_client') Check that mutex 'request_mutex_of_ceph_osd_client' was not locked and lock it */ 1279 void ldv_mutex_lock_request_mutex_of_ceph_osd_client(struct mutex *lock) 1280 { 1281 /* LDV_COMMENT_ASSERT Mutex 'request_mutex_of_ceph_osd_client' must be unlocked */ 1282 ldv_assert(ldv_mutex_request_mutex_of_ceph_osd_client == 1); 1283 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'request_mutex_of_ceph_osd_client' */ 1284 ldv_mutex_request_mutex_of_ceph_osd_client = 2; 1285 } 1286 1287 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_request_mutex_of_ceph_osd_client') Check that mutex 'request_mutex_of_ceph_osd_client' was not locked and nondeterministically lock it. Return 0 on fails */ 1288 int ldv_mutex_trylock_request_mutex_of_ceph_osd_client(struct mutex *lock) 1289 { 1290 int is_mutex_held_by_another_thread; 1291 1292 /* LDV_COMMENT_ASSERT It may be an error if mutex 'request_mutex_of_ceph_osd_client' is locked at this point */ 1293 ldv_assert(ldv_mutex_request_mutex_of_ceph_osd_client == 1); 1294 1295 /* LDV_COMMENT_OTHER Construct nondetermined result */ 1296 is_mutex_held_by_another_thread = ldv_undef_int(); 1297 1298 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'request_mutex_of_ceph_osd_client' */ 1299 if (is_mutex_held_by_another_thread) 1300 { 1301 /* LDV_COMMENT_RETURN Finish with fail */ 1302 return 0; 1303 } 1304 else 1305 { 1306 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'request_mutex_of_ceph_osd_client' */ 1307 ldv_mutex_request_mutex_of_ceph_osd_client = 2; 1308 /* LDV_COMMENT_RETURN Finish with success */ 1309 return 1; 1310 } 1311 } 1312 1313 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_request_mutex_of_ceph_osd_client') Lock mutex 'request_mutex_of_ceph_osd_client' if atomic decrement result is zero */ 1314 int ldv_atomic_dec_and_mutex_lock_request_mutex_of_ceph_osd_client(atomic_t *cnt, struct mutex *lock) 1315 { 1316 int atomic_value_after_dec; 1317 1318 /* LDV_COMMENT_ASSERT Mutex 'request_mutex_of_ceph_osd_client' must be unlocked (since we may lock it in this function) */ 1319 ldv_assert(ldv_mutex_request_mutex_of_ceph_osd_client == 1); 1320 1321 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 1322 atomic_value_after_dec = ldv_undef_int(); 1323 1324 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 1325 if (atomic_value_after_dec == 0) 1326 { 1327 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'request_mutex_of_ceph_osd_client', as atomic has decremented to zero */ 1328 ldv_mutex_request_mutex_of_ceph_osd_client = 2; 1329 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'request_mutex_of_ceph_osd_client' */ 1330 return 1; 1331 } 1332 1333 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'request_mutex_of_ceph_osd_client' */ 1334 return 0; 1335 } 1336 1337 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 1338 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_request_mutex_of_ceph_osd_client') Check whether mutex 'request_mutex_of_ceph_osd_client' was locked */ 1339 int ldv_mutex_is_locked_request_mutex_of_ceph_osd_client(struct mutex *lock) 1340 { 1341 int nondetermined; 1342 1343 if(ldv_mutex_request_mutex_of_ceph_osd_client == 1) 1344 { 1345 /* LDV_COMMENT_OTHER Construct nondetermined result */ 1346 nondetermined = ldv_undef_int(); 1347 1348 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'request_mutex_of_ceph_osd_client' was locked */ 1349 if(nondetermined) 1350 { 1351 /* LDV_COMMENT_RETURN Mutex 'request_mutex_of_ceph_osd_client' was unlocked */ 1352 return 0; 1353 } 1354 else 1355 { 1356 /* LDV_COMMENT_RETURN Mutex 'request_mutex_of_ceph_osd_client' was locked */ 1357 return 1; 1358 } 1359 } 1360 else 1361 { 1362 /* LDV_COMMENT_RETURN Mutex 'request_mutex_of_ceph_osd_client' was locked */ 1363 return 1; 1364 } 1365 } 1366 1367 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_request_mutex_of_ceph_osd_client') Check that mutex 'request_mutex_of_ceph_osd_client' was locked and unlock it */ 1368 void ldv_mutex_unlock_request_mutex_of_ceph_osd_client(struct mutex *lock) 1369 { 1370 /* LDV_COMMENT_ASSERT Mutex 'request_mutex_of_ceph_osd_client' must be locked */ 1371 ldv_assert(ldv_mutex_request_mutex_of_ceph_osd_client == 2); 1372 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'request_mutex_of_ceph_osd_client' */ 1373 ldv_mutex_request_mutex_of_ceph_osd_client = 1; 1374 } 1375 1376 1377 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Make all mutexes unlocked at the beginning */ 1378 void ldv_initialize(void) 1379 { 1380 /* LDV_COMMENT_CHANGE_STATE Make mutex 'crush_scratch_mutex_of_ceph_osdmap' unlocked at the beginning */ 1381 ldv_mutex_crush_scratch_mutex_of_ceph_osdmap = 1; 1382 /* LDV_COMMENT_CHANGE_STATE Make mutex 'i_mutex_of_inode' unlocked at the beginning */ 1383 ldv_mutex_i_mutex_of_inode = 1; 1384 /* LDV_COMMENT_CHANGE_STATE Make mutex 'lock' unlocked at the beginning */ 1385 ldv_mutex_lock = 1; 1386 /* LDV_COMMENT_CHANGE_STATE Make mutex 'mount_mutex_of_ceph_client' unlocked at the beginning */ 1387 ldv_mutex_mount_mutex_of_ceph_client = 1; 1388 /* LDV_COMMENT_CHANGE_STATE Make mutex 'mutex_of_ceph_auth_client' unlocked at the beginning */ 1389 ldv_mutex_mutex_of_ceph_auth_client = 1; 1390 /* LDV_COMMENT_CHANGE_STATE Make mutex 'mutex_of_ceph_connection' unlocked at the beginning */ 1391 ldv_mutex_mutex_of_ceph_connection = 1; 1392 /* LDV_COMMENT_CHANGE_STATE Make mutex 'mutex_of_ceph_mon_client' unlocked at the beginning */ 1393 ldv_mutex_mutex_of_ceph_mon_client = 1; 1394 /* LDV_COMMENT_CHANGE_STATE Make mutex 'mutex_of_device' unlocked at the beginning */ 1395 ldv_mutex_mutex_of_device = 1; 1396 /* LDV_COMMENT_CHANGE_STATE Make mutex 'request_mutex_of_ceph_osd_client' unlocked at the beginning */ 1397 ldv_mutex_request_mutex_of_ceph_osd_client = 1; 1398 } 1399 1400 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all mutexes are unlocked at the end */ 1401 void ldv_check_final_state(void) 1402 { 1403 /* LDV_COMMENT_ASSERT Mutex 'crush_scratch_mutex_of_ceph_osdmap' must be unlocked at the end */ 1404 ldv_assert(ldv_mutex_crush_scratch_mutex_of_ceph_osdmap == 1); 1405 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked at the end */ 1406 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 1407 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked at the end */ 1408 ldv_assert(ldv_mutex_lock == 1); 1409 /* LDV_COMMENT_ASSERT Mutex 'mount_mutex_of_ceph_client' must be unlocked at the end */ 1410 ldv_assert(ldv_mutex_mount_mutex_of_ceph_client == 1); 1411 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_auth_client' must be unlocked at the end */ 1412 ldv_assert(ldv_mutex_mutex_of_ceph_auth_client == 1); 1413 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_connection' must be unlocked at the end */ 1414 ldv_assert(ldv_mutex_mutex_of_ceph_connection == 1); 1415 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_ceph_mon_client' must be unlocked at the end */ 1416 ldv_assert(ldv_mutex_mutex_of_ceph_mon_client == 1); 1417 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked at the end */ 1418 ldv_assert(ldv_mutex_mutex_of_device == 1); 1419 /* LDV_COMMENT_ASSERT Mutex 'request_mutex_of_ceph_osd_client' must be unlocked at the end */ 1420 ldv_assert(ldv_mutex_request_mutex_of_ceph_osd_client == 1); 1421 }
1 #ifndef _LDV_RCV_H_ 2 #define _LDV_RCV_H_ 3 4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error 5 label like the standard assert(). */ 6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error()) 7 8 /* The error label wrapper. It is used because of some static verifiers (like 9 BLAST) don't accept multiple error labels through a program. */ 10 static inline void ldv_error(void) 11 { 12 LDV_ERROR: goto LDV_ERROR; 13 } 14 15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is 16 avoided by verifiers. */ 17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop()) 18 19 /* Infinite loop, that causes verifiers to skip such paths. */ 20 static inline void ldv_stop(void) { 21 LDV_STOP: goto LDV_STOP; 22 } 23 24 /* Special nondeterministic functions. */ 25 int ldv_undef_int(void); 26 void *ldv_undef_ptr(void); 27 unsigned long ldv_undef_ulong(void); 28 /* Return nondeterministic negative integer number. */ 29 static inline int ldv_undef_int_negative(void) 30 { 31 int ret = ldv_undef_int(); 32 33 ldv_assume(ret < 0); 34 35 return ret; 36 } 37 /* Return nondeterministic nonpositive integer number. */ 38 static inline int ldv_undef_int_nonpositive(void) 39 { 40 int ret = ldv_undef_int(); 41 42 ldv_assume(ret <= 0); 43 44 return ret; 45 } 46 47 /* Add explicit model for __builin_expect GCC function. Without the model a 48 return value will be treated as nondetermined by verifiers. */ 49 long __builtin_expect(long exp, long c) 50 { 51 return exp; 52 } 53 54 /* This function causes the program to exit abnormally. GCC implements this 55 function by using a target-dependent mechanism (such as intentionally executing 56 an illegal instruction) or by calling abort. The mechanism used may vary from 57 release to release so you should not rely on any particular implementation. 58 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */ 59 void __builtin_trap(void) 60 { 61 ldv_assert(0); 62 } 63 64 /* The constant is for simulating an error of ldv_undef_ptr() function. */ 65 #define LDV_PTR_MAX 2012 66 67 #endif /* _LDV_RCV_H_ */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Kernel Module Rule Verifier Verdict Status Timestamp
linux-3.17-rc1.tar.xz net/ceph/libceph.ko 32_7a CPAchecker Bug Unreported 2014-12-12 13:08:54

Comment

In ceph_build_auth() the "&ac->mutex" mutex is acquired. Then if "ac->protocol" is not set, this "ac" is passed as parameter to ceph_auth_build_hello(), which tries to acquire the "&ac->mutex" mutex again. This is a deadlock. Was reported 29 Jul 2013, but this code was reimplemented.

[Home]