Error Trace

[Home]

Bug # 101

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
255 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
108 typedef __u32 uint32_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
159 typedef unsigned int oom_flags_t;
162 typedef u64 phys_addr_t;
167 typedef phys_addr_t resource_size_t;
177 struct __anonstruct_atomic_t_6 { int counter; } ;
177 typedef struct __anonstruct_atomic_t_6 atomic_t;
182 struct __anonstruct_atomic64_t_7 { long counter; } ;
182 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
183 struct list_head { struct list_head *next; struct list_head *prev; } ;
188 struct hlist_node ;
188 struct hlist_head { struct hlist_node *first; } ;
192 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
203 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
70 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_9 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4; struct __anonstruct____missing_field_name_10 __annonCompField5; } ;
66 struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6; } ;
12 typedef unsigned long pteval_t;
13 typedef unsigned long pmdval_t;
15 typedef unsigned long pgdval_t;
16 typedef unsigned long pgprotval_t;
18 struct __anonstruct_pte_t_11 { pteval_t pte; } ;
18 typedef struct __anonstruct_pte_t_11 pte_t;
20 struct pgprot { pgprotval_t pgprot; } ;
218 typedef struct pgprot pgprot_t;
220 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ;
220 typedef struct __anonstruct_pgd_t_12 pgd_t;
259 struct __anonstruct_pmd_t_14 { pmdval_t pmd; } ;
259 typedef struct __anonstruct_pmd_t_14 pmd_t;
393 struct page ;
393 typedef struct page *pgtable_t;
404 struct file ;
417 struct seq_file ;
455 struct thread_struct ;
457 struct mm_struct ;
458 struct task_struct ;
459 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
131 typedef void (*ctor_fn_t)();
234 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
48 struct device ;
420 struct file_operations ;
432 struct completion ;
693 struct lockdep_map ;
19 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
328 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
102 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
652 typedef struct cpumask *cpumask_var_t;
260 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_30 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_31 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField12; struct __anonstruct____missing_field_name_31 __annonCompField13; } ;
26 union __anonunion____missing_field_name_32 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_29 __annonCompField14; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_32 __annonCompField15; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
214 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
220 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
235 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
252 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
170 struct seq_operations ;
369 struct perf_event ;
370 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; struct fpu fpu; } ;
27 union __anonunion___u_46 { int __val; char __c[1U]; } ;
39 union __anonunion___u_48 { int __val; char __c[1U]; } ;
23 typedef atomic64_t atomic_long_t;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
546 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_58 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_57 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_58 __annonCompField18; } ;
33 struct spinlock { union __anonunion____missing_field_name_57 __annonCompField19; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_59 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_59 rwlock_t;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
41 struct attribute_group ;
47 struct nvdimm ;
48 struct nvdimm_bus_descriptor ;
51 struct nd_namespace_label ;
52 struct nvdimm_drvdata ;
53 struct nd_mapping { struct nvdimm *nvdimm; struct nd_namespace_label **labels; u64 start; u64 size; struct nvdimm_drvdata *ndd; } ;
69 struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long dsm_mask; char *provider_name; int (*ndctl)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, void *, unsigned int); } ;
76 struct nd_cmd_desc { int in_num; int out_num; u32 in_sizes[4U]; int out_sizes[4U]; } ;
83 struct nd_interleave_set { u64 cookie; } ;
87 struct resource ;
87 struct nd_region_desc { struct resource *res; struct nd_mapping *nd_mapping; u16 num_mappings; const struct attribute_group **attr_groups; struct nd_interleave_set *nd_set; void *provider_data; int num_lanes; int numa_node; unsigned long flags; } ;
99 struct nvdimm_bus ;
100 struct nd_blk_region ;
101 struct nd_blk_region_desc { int (*enable)(struct nvdimm_bus *, struct device *); void (*disable)(struct nvdimm_bus *, struct device *); int (*do_io)(struct nd_blk_region *, resource_size_t , void *, u64 , int); struct nd_region_desc ndr_desc; } ;
126 struct nd_region ;
135 struct timespec ;
136 struct compat_timespec ;
137 struct __anonstruct_futex_61 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
137 struct __anonstruct_nanosleep_62 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
137 struct pollfd ;
137 struct __anonstruct_poll_63 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
137 union __anonunion____missing_field_name_60 { struct __anonstruct_futex_61 futex; struct __anonstruct_nanosleep_62 nanosleep; struct __anonstruct_poll_63 poll; } ;
137 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_60 __annonCompField20; } ;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
404 struct __anonstruct_seqlock_t_76 { struct seqcount seqcount; spinlock_t lock; } ;
404 typedef struct __anonstruct_seqlock_t_76 seqlock_t;
598 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_77 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_77 kuid_t;
27 struct __anonstruct_kgid_t_78 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_78 kgid_t;
139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
95 struct __anonstruct_nodemask_t_79 { unsigned long bits[16U]; } ;
95 typedef struct __anonstruct_nodemask_t_79 nodemask_t;
733 struct rw_semaphore ;
734 struct rw_semaphore { long count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
172 struct completion { unsigned int done; wait_queue_head_t wait; } ;
446 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1135 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
238 struct hrtimer ;
239 enum hrtimer_restart ;
240 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
838 struct nsproxy ;
259 struct workqueue_struct ;
260 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
615 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
25 struct ldt_struct ;
25 struct __anonstruct_mm_context_t_148 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; atomic_t perf_rdpmc_allowed; } ;
25 typedef struct __anonstruct_mm_context_t_148 mm_context_t;
22 struct bio_vec ;
1218 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_184 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_185 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_183 { struct __anonstruct____missing_field_name_184 __annonCompField35; struct __anonstruct____missing_field_name_185 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_183 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
152 union __anonunion____missing_field_name_186 { struct address_space *mapping; void *s_mem; } ;
152 union __anonunion____missing_field_name_188 { unsigned long index; void *freelist; } ;
152 struct __anonstruct____missing_field_name_192 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
152 union __anonunion____missing_field_name_191 { atomic_t _mapcount; struct __anonstruct____missing_field_name_192 __annonCompField40; int units; } ;
152 struct __anonstruct____missing_field_name_190 { union __anonunion____missing_field_name_191 __annonCompField41; atomic_t _count; } ;
152 union __anonunion____missing_field_name_189 { unsigned long counters; struct __anonstruct____missing_field_name_190 __annonCompField42; unsigned int active; } ;
152 struct __anonstruct____missing_field_name_187 { union __anonunion____missing_field_name_188 __annonCompField39; union __anonunion____missing_field_name_189 __annonCompField43; } ;
152 struct __anonstruct____missing_field_name_194 { struct page *next; int pages; int pobjects; } ;
152 struct __anonstruct____missing_field_name_195 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
152 struct __anonstruct____missing_field_name_196 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
152 union __anonunion____missing_field_name_193 { struct list_head lru; struct __anonstruct____missing_field_name_194 __annonCompField45; struct callback_head callback_head; struct __anonstruct____missing_field_name_195 __annonCompField46; struct __anonstruct____missing_field_name_196 __annonCompField47; } ;
152 struct kmem_cache ;
152 union __anonunion____missing_field_name_197 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
152 struct page { unsigned long flags; union __anonunion____missing_field_name_186 __annonCompField38; struct __anonstruct____missing_field_name_187 __annonCompField44; union __anonunion____missing_field_name_193 __annonCompField48; union __anonunion____missing_field_name_197 __annonCompField49; struct mem_cgroup *mem_cgroup; } ;
194 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
279 struct userfaultfd_ctx ;
279 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
286 struct __anonstruct_shared_198 { struct rb_node rb; unsigned long rb_subtree_last; } ;
286 struct anon_vma ;
286 struct vm_operations_struct ;
286 struct mempolicy ;
286 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_198 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
359 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
364 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
377 struct task_rss_stat { int events; int count[3U]; } ;
385 struct mm_rss_stat { atomic_long_t count[3U]; } ;
390 struct kioctx_table ;
391 struct linux_binfmt ;
391 struct mmu_notifier_mm ;
391 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
53 union __anonunion____missing_field_name_203 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_203 __annonCompField50; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
185 struct dentry ;
186 struct iattr ;
187 struct super_block ;
188 struct file_system_type ;
189 struct kernfs_open_node ;
190 struct kernfs_iattrs ;
213 struct kernfs_root ;
213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_208 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_208 __annonCompField51; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;
155 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
171 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
188 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
481 struct sock ;
482 struct kobject ;
483 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
489 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_209 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_209 __annonCompField52; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
38 struct module_param_attrs ;
38 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
48 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
74 struct exception_table_entry ;
290 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
297 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
304 struct module_sect_attrs ;
304 struct module_notes_attrs ;
304 struct tracepoint ;
304 struct trace_event_call ;
304 struct trace_enum_map ;
304 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; struct mod_tree_node mtn_core; struct mod_tree_node mtn_init; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp_alive; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
27 struct nd_cmd_dimm_flags { __u32 status; __u32 flags; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_219 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_219 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_221 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_222 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_223 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_224 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_226 { void *_lower; void *_upper; } ;
11 struct __anonstruct__sigfault_225 { void *_addr; short _addr_lsb; struct __anonstruct__addr_bnd_226 _addr_bnd; } ;
11 struct __anonstruct__sigpoll_227 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_228 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_220 { int _pad[28U]; struct __anonstruct__kill_221 _kill; struct __anonstruct__timer_222 _timer; struct __anonstruct__rt_223 _rt; struct __anonstruct__sigchld_224 _sigchld; struct __anonstruct__sigfault_225 _sigfault; struct __anonstruct__sigpoll_227 _sigpoll; struct __anonstruct__sigsys_228 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_220 _sifields; } ;
113 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
243 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
257 struct k_sigaction { struct sigaction sa; } ;
443 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
450 struct pid_namespace ;
450 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
53 struct seccomp_filter ;
54 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ;
123 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
156 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
466 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
39 struct assoc_array_ptr ;
39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_247 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_248 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_250 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_249 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_250 __annonCompField55; } ;
128 struct __anonstruct____missing_field_name_252 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_251 { union key_payload payload; struct __anonstruct____missing_field_name_252 __annonCompField57; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_247 __annonCompField53; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_248 __annonCompField54; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_249 __annonCompField56; union __anonunion____missing_field_name_251 __annonCompField58; } ;
353 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
377 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
327 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
333 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ;
54 struct cgroup ;
55 struct cgroup_root ;
56 struct cgroup_subsys ;
57 struct cgroup_taskset ;
105 struct cgroup_file { struct list_head node; struct kernfs_node *kn; } ;
96 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct list_head files; struct callback_head callback_head; struct work_struct destroy_work; } ;
144 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; struct callback_head callback_head; } ;
220 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; unsigned int subtree_control; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; } ;
293 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
329 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
414 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); void (*css_e_css_changed)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); int (*can_fork)(struct task_struct *, void **); void (*cancel_fork)(struct task_struct *, void *); void (*fork)(struct task_struct *, void *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
135 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
478 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
519 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
527 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
534 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
559 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
575 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
597 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
642 struct autogroup ;
643 struct tty_struct ;
643 struct taskstats ;
643 struct tty_audit_buf ;
643 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
810 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
853 struct backing_dev_info ;
854 struct reclaim_state ;
855 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
869 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
917 struct wake_q_node { struct wake_q_node *next; } ;
1144 struct io_context ;
1178 struct pipe_inode_info ;
1180 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1187 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1207 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1242 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1274 struct rt_rq ;
1274 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1290 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1358 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1377 struct sched_class ;
1377 struct files_struct ;
1377 struct compat_robust_list_head ;
1377 struct numa_group ;
1377 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct thread_struct thread; } ;
70 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
68 struct path ;
69 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ;
35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
227 struct pinctrl ;
228 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
48 struct dma_map_ops ;
48 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
14 struct device_private ;
15 struct device_driver ;
16 struct driver_private ;
17 struct class ;
18 struct subsys_private ;
19 struct bus_type ;
20 struct device_node ;
21 struct fwnode_handle ;
22 struct iommu_ops ;
23 struct iommu_group ;
61 struct device_attribute ;
61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
139 struct device_type ;
197 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
203 struct of_device_id ;
203 struct acpi_device_id ;
203 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
353 struct class_attribute ;
353 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
446 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
514 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
542 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
683 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
692 struct irq_domain ;
692 struct dma_coherent_mem ;
692 struct cma ;
692 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
846 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
1306 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ;
1315 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ;
161 typedef u64 acpi_size;
162 typedef u64 acpi_io_address;
447 typedef u32 acpi_status;
449 typedef char *acpi_string;
450 typedef void *acpi_handle;
644 typedef u32 acpi_object_type;
901 struct __anonstruct_integer_268 { acpi_object_type type; u64 value; } ;
901 struct __anonstruct_string_269 { acpi_object_type type; u32 length; char *pointer; } ;
901 struct __anonstruct_buffer_270 { acpi_object_type type; u32 length; u8 *pointer; } ;
901 struct __anonstruct_package_271 { acpi_object_type type; u32 count; union acpi_object *elements; } ;
901 struct __anonstruct_reference_272 { acpi_object_type type; acpi_object_type actual_type; acpi_handle handle; } ;
901 struct __anonstruct_processor_273 { acpi_object_type type; u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } ;
901 struct __anonstruct_power_resource_274 { acpi_object_type type; u32 system_level; u32 resource_order; } ;
901 union acpi_object { acpi_object_type type; struct __anonstruct_integer_268 integer; struct __anonstruct_string_269 string; struct __anonstruct_buffer_270 buffer; struct __anonstruct_package_271 package; struct __anonstruct_reference_272 reference; struct __anonstruct_processor_273 processor; struct __anonstruct_power_resource_274 power_resource; } ;
954 struct acpi_object_list { u32 count; union acpi_object *pointer; } ;
962 struct acpi_buffer { acpi_size length; void *pointer; } ;
77 struct acpi_table_header { char signature[4U]; u32 length; u8 revision; u8 checksum; char oem_id[6U]; char oem_table_id[8U]; u32 oem_revision; char asl_compiler_id[4U]; u32 asl_compiler_revision; } ;
934 struct acpi_table_nfit { struct acpi_table_header header; u32 reserved; } ;
946 struct acpi_nfit_header { u16 type; u16 length; } ;
964 struct acpi_nfit_system_address { struct acpi_nfit_header header; u16 range_index; u16 flags; u32 reserved; u32 proximity_domain; u8 range_guid[16U]; u64 address; u64 length; u64 memory_mapping; } ;
984 struct acpi_nfit_memory_map { struct acpi_nfit_header header; u32 device_handle; u16 physical_id; u16 region_id; u16 range_index; u16 region_index; u64 region_size; u64 region_offset; u64 address; u16 interleave_index; u16 interleave_ways; u16 flags; u16 reserved; } ;
1009 struct acpi_nfit_interleave { struct acpi_nfit_header header; u16 interleave_index; u16 reserved; u32 line_count; u32 line_size; u32 line_offset[1U]; } ;
1037 struct acpi_nfit_control_region { struct acpi_nfit_header header; u16 region_index; u16 vendor_id; u16 device_id; u16 revision_id; u16 subsystem_vendor_id; u16 subsystem_device_id; u16 subsystem_revision_id; u8 reserved[6U]; u32 serial_number; u16 code; u16 windows; u64 window_size; u64 command_offset; u64 command_size; u64 status_offset; u64 status_size; u16 flags; u8 reserved1[6U]; } ;
1061 struct acpi_nfit_data_region { struct acpi_nfit_header header; u16 region_index; u16 windows; u64 offset; u64 size; u64 capacity; u64 start_address; } ;
1077 struct acpi_nfit_flush_address { struct acpi_nfit_header header; u32 device_handle; u16 hint_count; u8 reserved[6U]; u64 hint_address[1U]; } ;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_285 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_284 { struct __anonstruct____missing_field_name_285 __annonCompField62; } ;
114 struct lockref { union __anonunion____missing_field_name_284 __annonCompField63; } ;
50 struct vfsmount ;
51 struct __anonstruct____missing_field_name_287 { u32 hash; u32 len; } ;
51 union __anonunion____missing_field_name_286 { struct __anonstruct____missing_field_name_287 __annonCompField64; u64 hash_len; } ;
51 struct qstr { union __anonunion____missing_field_name_286 __annonCompField65; const unsigned char *name; } ;
90 struct dentry_operations ;
90 union __anonunion_d_u_288 { struct hlist_node d_alias; struct callback_head d_rcu; } ;
90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_288 d_u; } ;
142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct inode * (*d_select_inode)(struct dentry *, unsigned int); } ;
586 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
58 struct __anonstruct____missing_field_name_292 { struct radix_tree_node *parent; void *private_data; } ;
58 union __anonunion____missing_field_name_291 { struct __anonstruct____missing_field_name_292 __annonCompField66; struct callback_head callback_head; } ;
58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion____missing_field_name_291 __annonCompField67; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
47 struct block_device ;
19 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
60 struct bdi_writeback ;
61 struct export_operations ;
63 struct iovec ;
64 struct kiocb ;
65 struct poll_table_struct ;
66 struct kstatfs ;
67 struct swap_info_struct ;
68 struct iov_iter ;
75 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
212 struct dquot ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_298 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_298 kprojid_t;
166 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_299 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_299 __annonCompField69; enum quota_type type; } ;
184 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ;
206 struct quota_format_type ;
207 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
272 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
299 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ;
310 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); } ;
325 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
348 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
394 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
405 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
418 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
432 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
496 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
526 struct writeback_control ;
527 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
366 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *, loff_t ); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
423 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
443 struct request_queue ;
444 struct hd_struct ;
444 struct gendisk ;
444 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
560 struct posix_acl ;
561 struct inode_operations ;
561 union __anonunion____missing_field_name_302 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
561 union __anonunion____missing_field_name_303 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
561 struct file_lock_context ;
561 struct cdev ;
561 union __anonunion____missing_field_name_304 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; } ;
561 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_302 __annonCompField70; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; union __anonunion____missing_field_name_303 __annonCompField71; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_304 __annonCompField72; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ;
807 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
815 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
838 union __anonunion_f_u_305 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
838 struct file { union __anonunion_f_u_305 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
923 typedef void *fl_owner_t;
924 struct file_lock ;
925 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
931 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
958 struct nlm_lockowner ;
959 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_307 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_306 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_307 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_306 fl_u; } ;
1011 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1214 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1249 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1275 struct super_operations ;
1275 struct xattr_handler ;
1275 struct mtd_info ;
1275 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; } ;
1524 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1538 struct dir_context ;
1563 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1570 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); } ;
1630 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*follow_link)(struct dentry *, void **); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct inode *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1682 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
1921 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3028 struct proc_dir_entry ;
106 struct acpi_driver ;
107 struct acpi_device ;
108 struct acpi_hotplug_profile { struct kobject kobj; int (*scan_dependent)(struct acpi_device *); void (*notify_online)(struct acpi_device *); bool enabled; bool demand_offline; } ;
129 struct acpi_scan_handler { const struct acpi_device_id *ids; struct list_head list_node; bool (*match)(const char *, const struct acpi_device_id **); int (*attach)(struct acpi_device *, const struct acpi_device_id *); void (*detach)(struct acpi_device *); void (*bind)(struct device *); void (*unbind)(struct device *); struct acpi_hotplug_profile hotplug; } ;
139 struct acpi_hotplug_context { struct acpi_device *self; int (*notify)(struct acpi_device *, u32 ); void (*uevent)(struct acpi_device *, u32 ); void (*fixup)(struct acpi_device *); } ;
160 struct acpi_device_ops { int (*add)(struct acpi_device *); int (*remove)(struct acpi_device *); void (*notify)(struct acpi_device *, u32 ); } ;
166 struct acpi_driver { char name[80U]; char class[80U]; const struct acpi_device_id *ids; unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; struct module *owner; } ;
178 struct acpi_device_status { unsigned char present; unsigned char enabled; unsigned char show_in_ui; unsigned char functional; unsigned char battery_present; unsigned int reserved; } ;
194 struct acpi_device_flags { unsigned char dynamic_status; unsigned char removable; unsigned char ejectable; unsigned char power_manageable; unsigned char match_driver; unsigned char initialized; unsigned char visited; unsigned char hotplug_notify; unsigned char is_dock_station; unsigned char of_compatible_ok; unsigned char coherent_dma; unsigned char cca_seen; unsigned int reserved; } ;
212 struct acpi_device_dir { struct proc_dir_entry *entry; } ;
223 typedef char acpi_bus_id[8U];
224 typedef unsigned long acpi_bus_address;
225 typedef char acpi_device_name[40U];
226 typedef char acpi_device_class[20U];
232 struct acpi_pnp_type { unsigned char hardware_id; unsigned char bus_address; unsigned char platform_id; unsigned int reserved; } ;
239 struct acpi_device_pnp { acpi_bus_id bus_id; struct acpi_pnp_type type; acpi_bus_address bus_address; char *unique_id; struct list_head ids; acpi_device_name device_name; acpi_device_class device_class; union acpi_object *str_obj; } ;
254 struct acpi_device_power_flags { unsigned char explicit_get; unsigned char power_resources; unsigned char inrush_current; unsigned char power_removed; unsigned char ignore_parent; unsigned char dsw_present; unsigned int reserved; } ;
269 struct __anonstruct_flags_308 { unsigned char valid; unsigned char explicit_set; unsigned char reserved; } ;
269 struct acpi_device_power_state { struct __anonstruct_flags_308 flags; int power; int latency; struct list_head resources; } ;
280 struct acpi_device_power { int state; struct acpi_device_power_flags flags; struct acpi_device_power_state states[5U]; } ;
286 struct acpi_device_perf_flags { u8 reserved; } ;
292 struct __anonstruct_flags_309 { unsigned char valid; unsigned char reserved; } ;
292 struct acpi_device_perf_state { struct __anonstruct_flags_309 flags; u8 power; u8 performance; int latency; } ;
302 struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; } ;
309 struct acpi_device_wakeup_flags { unsigned char valid; unsigned char run_wake; unsigned char notifier_present; unsigned char enabled; } ;
317 struct acpi_device_wakeup_context { struct work_struct work; struct device *dev; } ;
322 struct acpi_device_wakeup { acpi_handle gpe_device; u64 gpe_number; u64 sleep_state; struct list_head resources; struct acpi_device_wakeup_flags flags; struct acpi_device_wakeup_context context; struct wakeup_source *ws; int prepare_count; } ;
340 struct acpi_device_data { const union acpi_object *pointer; const union acpi_object *properties; const union acpi_object *of_compatible; struct list_head subnodes; } ;
348 struct acpi_gpio_mapping ;
349 struct acpi_device { int device_type; acpi_handle handle; struct fwnode_handle fwnode; struct acpi_device *parent; struct list_head children; struct list_head node; struct list_head wakeup_list; struct list_head del_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; struct acpi_driver *driver; const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; unsigned int physical_node_count; unsigned int dep_unmet; struct list_head physical_node_list; struct mutex physical_node_lock; void (*remove)(struct acpi_device *); } ;
697 struct acpi_gpio_params { unsigned int crs_entry_index; unsigned int line_index; bool active_low; } ;
750 struct acpi_gpio_mapping { const char *name; const struct acpi_gpio_params *data; unsigned int size; } ;
7 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
27 union __anonunion____missing_field_name_311 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ;
27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_311 __annonCompField74; unsigned long nr_segs; } ;
62 struct exception_table_entry { int insn; int fixup; } ;
214 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; unsigned long max_pgoff; pte_t *pte; } ;
246 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
181 enum nfit_uuids { NFIT_SPA_VOLATILE = 0, NFIT_SPA_PM = 1, NFIT_SPA_DCR = 2, NFIT_SPA_BDW = 3, NFIT_SPA_VDISK = 4, NFIT_SPA_VCD = 5, NFIT_SPA_PDISK = 6, NFIT_SPA_PCD = 7, NFIT_DEV_BUS = 8, NFIT_DEV_DIMM = 9, NFIT_UUID_MAX = 10 } ;
200 struct nfit_spa { struct acpi_nfit_system_address *spa; struct list_head list; int is_registered; } ;
53 struct nfit_dcr { struct acpi_nfit_control_region *dcr; struct list_head list; } ;
58 struct nfit_bdw { struct acpi_nfit_data_region *bdw; struct list_head list; } ;
63 struct nfit_idt { struct acpi_nfit_interleave *idt; struct list_head list; } ;
68 struct nfit_flush { struct acpi_nfit_flush_address *flush; struct list_head list; } ;
73 struct nfit_memdev { struct acpi_nfit_memory_map *memdev; struct list_head list; } ;
78 struct nfit_mem { struct nvdimm *nvdimm; struct acpi_nfit_memory_map *memdev_dcr; struct acpi_nfit_memory_map *memdev_pmem; struct acpi_nfit_memory_map *memdev_bdw; struct acpi_nfit_control_region *dcr; struct acpi_nfit_data_region *bdw; struct acpi_nfit_system_address *spa_dcr; struct acpi_nfit_system_address *spa_bdw; struct acpi_nfit_interleave *idt_dcr; struct acpi_nfit_interleave *idt_bdw; struct nfit_flush *nfit_flush; struct list_head list; struct acpi_device *adev; unsigned long dsm_mask; } ;
96 struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; struct acpi_table_nfit *nfit; struct mutex spa_map_mutex; struct mutex init_mutex; struct list_head spa_maps; struct list_head memdevs; struct list_head flushes; struct list_head dimms; struct list_head spas; struct list_head dcrs; struct list_head bdws; struct list_head idts; struct nvdimm_bus *nvdimm_bus; struct device *dev; unsigned long dimm_dsm_force_en; unsigned long bus_dsm_force_en; int (*blk_do_io)(struct nd_blk_region *, resource_size_t , void *, u64 , int); } ;
121 union __anonunion____missing_field_name_320 { void *base; void *aperture; } ;
121 struct nd_blk_addr { union __anonunion____missing_field_name_320 __annonCompField75; } ;
129 struct nfit_blk_mmio { struct nd_blk_addr addr; u64 size; u64 base_offset; u32 line_size; u32 num_lines; u32 table_size; struct acpi_nfit_interleave *idt; struct acpi_nfit_system_address *spa; } ;
141 struct nfit_blk { struct nfit_blk_mmio mmio[2U]; struct nd_region *nd_region; u64 bdw_offset; u64 stat_offset; u64 cmd_offset; void *nvdimm_flush; u32 dimm_flags; } ;
148 enum spa_map_type { SPA_MAP_CONTROL = 0, SPA_MAP_APERTURE = 1 } ;
153 struct nfit_spa_mapping { struct acpi_nfit_desc *acpi_desc; struct acpi_nfit_system_address *spa; struct list_head list; struct kref kref; enum spa_map_type type; struct nd_blk_addr addr; } ;
36 struct nfit_table_prev { struct list_head spas; struct list_head memdevs; struct list_head dcrs; struct list_head bdws; struct list_head idts; struct list_head flushes; } ;
946 struct nfit_set_info_map { u64 region_offset; u32 serial_number; u32 pad; } ;
956 struct nfit_set_info { struct nfit_set_info_map mapping[0U]; } ;
135 typedef int ldv_func_ret_type;
170 typedef int ldv_func_ret_type___0;
181 typedef int ldv_func_ret_type___1;
208 typedef int ldv_func_ret_type___2;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long exp, long c);
216 void __read_once_size(const volatile void *p, void *res, int size);
241 void __write_once_size(volatile void *p, void *res, int size);
33 extern struct module __this_module;
420 bool __static_cpu_has(u16 bit);
72 void set_bit(long nr, volatile unsigned long *addr);
314 int variable_test_bit(long nr, const volatile unsigned long *addr);
53 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);
431 void print_hex_dump(const char *, const char *, int, int, int, const void *, size_t , bool );
402 int sprintf(char *, const char *, ...);
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void * ldv_err_ptr(long error);
6 long int ldv_ptr_err(const void *ptr);
25 void INIT_LIST_HEAD(struct list_head *list);
48 void __list_add(struct list_head *, struct list_head *, struct list_head *);
61 void list_add(struct list_head *new, struct list_head *head);
75 void list_add_tail(struct list_head *new, struct list_head *head);
112 void __list_del_entry(struct list_head *);
113 void list_del(struct list_head *);
165 void list_move_tail(struct list_head *list, struct list_head *head);
187 int list_empty(const struct list_head *head);
229 int list_is_singular(const struct list_head *head);
234 void __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry);
260 void list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry);
31 void * __memcpy(void *, const void *, size_t );
56 void * __memset(void *, int, size_t );
62 int memcmp(const void *, const void *, size_t );
66 int strcmp(const char *, const char *);
66 void warn_slowpath_fmt(const char *, const int, const char *, ...);
71 void warn_slowpath_null(const char *, const int);
256 void pcommit_sfence();
18 u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder);
23 void * ERR_PTR(long error);
32 long int PTR_ERR(const void *ptr);
41 bool IS_ERR(const void *ptr);
50 bool IS_ERR_OR_NULL(const void *ptr);
15 void __xadd_wrong_size();
25 int atomic_read(const atomic_t *v);
37 void atomic_set(atomic_t *v, int i);
79 int atomic_sub_and_test(int i, atomic_t *v);
155 int atomic_add_return(int i, atomic_t *v);
119 void __mutex_init(struct mutex *, const char *, struct lock_class_key *);
128 int mutex_is_locked(struct mutex *lock);
141 int ldv_mutex_is_locked_12(struct mutex *lock);
145 int ldv_mutex_is_locked_13(struct mutex *lock);
149 int ldv_mutex_is_locked_16(struct mutex *lock);
173 int mutex_trylock(struct mutex *);
176 int ldv_mutex_trylock_8(struct mutex *ldv_func_arg1);
178 void mutex_unlock(struct mutex *);
181 void ldv_mutex_unlock_6(struct mutex *ldv_func_arg1);
185 void ldv_mutex_unlock_9(struct mutex *ldv_func_arg1);
189 void ldv_mutex_unlock_10(struct mutex *ldv_func_arg1);
193 void ldv_mutex_unlock_15(struct mutex *ldv_func_arg1);
197 void ldv_mutex_unlock_18(struct mutex *ldv_func_arg1);
201 void ldv_mutex_unlock_20(struct mutex *ldv_func_arg1);
18 void mutex_lock(struct mutex *);
21 void ldv_mutex_lock_5(struct mutex *ldv_func_arg1);
25 void ldv_mutex_lock_7(struct mutex *ldv_func_arg1);
29 void ldv_mutex_lock_11(struct mutex *ldv_func_arg1);
33 void ldv_mutex_lock_14(struct mutex *ldv_func_arg1);
37 void ldv_mutex_lock_17(struct mutex *ldv_func_arg1);
41 void ldv_mutex_lock_19(struct mutex *ldv_func_arg1);
46 void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock);
50 void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock);
59 void ldv_mutex_lock_init_mutex_of_acpi_nfit_desc(struct mutex *lock);
63 void ldv_mutex_unlock_init_mutex_of_acpi_nfit_desc(struct mutex *lock);
72 void ldv_mutex_lock_lock(struct mutex *lock);
76 void ldv_mutex_unlock_lock(struct mutex *lock);
85 void ldv_mutex_lock_mutex_of_device(struct mutex *lock);
86 int ldv_mutex_trylock_mutex_of_device(struct mutex *lock);
89 void ldv_mutex_unlock_mutex_of_device(struct mutex *lock);
98 void ldv_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock);
101 int ldv_mutex_is_locked_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock);
102 void ldv_mutex_unlock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock);
8 void list_sort(void *, struct list_head *, int (*)(void *, struct list_head *, struct list_head *));
41 extern struct attribute_group nvdimm_bus_attribute_group;
42 extern struct attribute_group nvdimm_attribute_group;
43 extern struct attribute_group nd_device_attribute_group;
44 extern struct attribute_group nd_numa_attribute_group;
45 extern struct attribute_group nd_region_attribute_group;
46 extern struct attribute_group nd_mapping_attribute_group;
112 struct nd_blk_region_desc * to_blk_region_desc(struct nd_region_desc *ndr_desc);
119 struct nvdimm_bus * __nvdimm_bus_register(struct device *, struct nvdimm_bus_descriptor *, struct module *);
123 void nvdimm_bus_unregister(struct nvdimm_bus *);
124 struct nvdimm_bus * to_nvdimm_bus(struct device *);
125 struct nvdimm * to_nvdimm(struct device *);
126 struct nd_region * to_nd_region(struct device *);
127 struct nd_blk_region * to_nd_blk_region(struct device *);
128 struct nvdimm_bus_descriptor * to_nd_desc(struct nvdimm_bus *);
129 const char * nvdimm_name(struct nvdimm *);
130 void * nvdimm_provider_data(struct nvdimm *);
131 struct nvdimm * nvdimm_create(struct nvdimm_bus *, void *, const struct attribute_group **, unsigned long, unsigned long *);
134 const struct nd_cmd_desc * nd_cmd_dimm_desc(int);
135 const struct nd_cmd_desc * nd_cmd_bus_desc(int);
136 u32 nd_cmd_in_size(struct nvdimm *, int, const struct nd_cmd_desc *, int, void *);
138 u32 nd_cmd_out_size(struct nvdimm *, int, const struct nd_cmd_desc *, int, const u32 *, const u32 *);
141 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *, int);
142 struct nd_region * nvdimm_pmem_region_create(struct nvdimm_bus *, struct nd_region_desc *);
144 struct nd_region * nvdimm_blk_region_create(struct nvdimm_bus *, struct nd_region_desc *);
146 struct nd_region * nvdimm_volatile_region_create(struct nvdimm_bus *, struct nd_region_desc *);
148 void * nd_region_provider_data(struct nd_region *);
149 void * nd_blk_region_provider_data(struct nd_blk_region *);
150 void nd_blk_region_set_provider_data(struct nd_blk_region *, void *);
151 struct nvdimm * nd_blk_region_to_dimm(struct nd_blk_region *);
152 unsigned int nd_region_acquire_lane(struct nd_region *);
153 void nd_region_release_lane(struct nd_region *, unsigned int);
154 u64 nd_fletcher64(void *, size_t , bool );
140 extern struct resource iomem_resource;
193 struct resource * __request_region(struct resource *, resource_size_t , resource_size_t , const char *, int);
202 void __release_region(struct resource *, resource_size_t , resource_size_t );
58 unsigned int readl(const volatile void *addr);
90 unsigned long int readq(const volatile void *addr);
91 void writeq(unsigned long val, volatile void *addr);
181 void * ioremap_nocache(resource_size_t , unsigned long);
197 void iounmap(volatile void *);
32 void kref_init(struct kref *kref);
41 void kref_get(struct kref *kref);
68 int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *));
97 int kref_put(struct kref *kref, void (*release)(struct kref *));
87 const char * kobject_name(const struct kobject *kobj);
119 const char * nvdimm_bus_cmd_name(unsigned int cmd);
132 const char * nvdimm_cmd_name(unsigned int cmd);
143 void kfree(const void *);
289 void * __kmalloc(size_t , gfp_t );
428 void * kmalloc(size_t size, gfp_t flags);
591 void * kzalloc(size_t size, gfp_t flags);
647 void * devm_kmalloc(struct device *, size_t , gfp_t );
653 void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
669 void devm_kfree(struct device *, void *);
849 const char * dev_name(const struct device *dev);
896 void * dev_get_drvdata(const struct device *dev);
901 void dev_set_drvdata(struct device *dev, void *data);
955 void device_lock(struct device *dev);
965 void device_unlock(struct device *dev);
1117 void dev_err(const struct device *, const char *, ...);
1119 void dev_warn(const struct device *, const char *, ...);
1123 void _dev_info(const struct device *, const char *, ...);
511 acpi_status acpi_evaluate_object(acpi_handle , acpi_string , struct acpi_object_list *, struct acpi_buffer *);
919 acpi_status acpi_get_table_with_size(acpi_string , u32 , struct acpi_table_header **, acpi_size *);
94 void acpi_os_free(void *memory);
64 bool acpi_check_dsm(acpi_handle , const u8 *, int, u64 );
65 union acpi_object * acpi_evaluate_dsm(acpi_handle , const u8 *, int, int, union acpi_object *);
509 int acpi_bus_register_driver(struct acpi_driver *);
510 void acpi_bus_unregister_driver(struct acpi_driver *);
568 struct acpi_device * acpi_find_child_device(struct acpi_device *, u64 , bool );
77 void * devm_ioremap_nocache(struct device *, resource_size_t , resource_size_t );
154 void * memremap(resource_size_t , size_t , unsigned long);
155 void memunmap(void *);
296 int acpi_map_pxm_to_online_node(int);
361 acpi_status acpi_str_to_uuid(char *, u8 *);
6 void sort(void *, size_t , size_t , int (*)(const void *, const void *), void (*)(void *, void *, int));
216 long int __copy_user_nocache(void *, const void *, unsigned int, int);
227 int __copy_from_user_inatomic_nocache(void *dst, const void *src, unsigned int size);
90 void clflush_cache_range(void *, unsigned int);
32 void arch_memcpy_to_pmem(void *dst, const void *src, size_t n);
57 void arch_wmb_pmem();
144 bool __arch_has_wmb_pmem();
63 void memcpy_from_pmem(void *dst, const void *src, size_t size);
68 bool arch_has_pmem_api();
82 bool arch_has_wmb_pmem();
126 void memcpy_to_pmem(void *dst, const void *src, size_t n);
141 void wmb_pmem();
163 struct nfit_spa_mapping * to_spa_map(struct kref *kref);
168 struct acpi_nfit_memory_map * __to_nfit_memdev(struct nfit_mem *nfit_mem);
176 struct acpi_nfit_desc * to_acpi_desc(struct nvdimm_bus_descriptor *nd_desc);
182 const u8 * to_nfit_uuid(enum nfit_uuids id);
183 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz);
184 const struct attribute_group *acpi_nfit_attribute_groups[3U];
33 _Bool force_enable_dimms = 0;
46 u8 nfit_uuid[10U][16U] = { };
52 const char __kstrtab_to_nfit_uuid[13U] = { 't', 'o', '_', 'n', 'f', 'i', 't', '_', 'u', 'u', 'i', 'd', '\x0' };
52 const struct kernel_symbol __ksymtab_to_nfit_uuid;
52 const struct kernel_symbol __ksymtab_to_nfit_uuid = { (unsigned long)(&to_nfit_uuid), (const char *)(&__kstrtab_to_nfit_uuid) };
54 struct acpi_nfit_desc * to_acpi_nfit_desc(struct nvdimm_bus_descriptor *nd_desc);
60 struct acpi_device * to_acpi_dev(struct acpi_nfit_desc *acpi_desc);
75 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len);
203 const char * spa_type_name(u16 type);
223 int nfit_spa_type(struct acpi_nfit_system_address *spa);
233 bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa);
259 bool add_memdev(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_memory_map *memdev);
284 bool add_dcr(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_control_region *dcr);
308 bool add_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_data_region *bdw);
332 bool add_idt(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_interleave *idt);
356 bool add_flush(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_flush_address *flush);
380 void * add_table(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, void *table, const void *end);
433 void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem);
466 int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa);
539 int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa);
612 int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b);
627 int nfit_mem_init(struct acpi_nfit_desc *acpi_desc);
652 ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf);
661 struct device_attribute dev_attr_revision = { { "revision", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &revision_show, 0 };
663 struct attribute *acpi_nfit_attributes[2U] = { &(dev_attr_revision.attr), (struct attribute *)0 };
668 struct attribute_group acpi_nfit_attribute_group = { "nfit", 0, 0, (struct attribute **)(&acpi_nfit_attributes), 0 };
673 const struct attribute_group *acpi_nfit_attribute_groups[3U] = { (const struct attribute_group *)(&nvdimm_bus_attribute_group), (const struct attribute_group *)(&acpi_nfit_attribute_group), (const struct attribute_group *)0 };
678 const char __kstrtab_acpi_nfit_attribute_groups[27U] = { 'a', 'c', 'p', 'i', '_', 'n', 'f', 'i', 't', '_', 'a', 't', 't', 'r', 'i', 'b', 'u', 't', 'e', '_', 'g', 'r', 'o', 'u', 'p', 's', '\x0' };
678 const struct kernel_symbol __ksymtab_acpi_nfit_attribute_groups;
678 const struct kernel_symbol __ksymtab_acpi_nfit_attribute_groups = { (unsigned long)(&acpi_nfit_attribute_groups), (const char *)(&__kstrtab_acpi_nfit_attribute_groups) };
680 struct acpi_nfit_memory_map * to_nfit_memdev(struct device *dev);
688 struct acpi_nfit_control_region * to_nfit_dcr(struct device *dev);
696 ssize_t handle_show(struct device *dev, struct device_attribute *attr, char *buf);
703 struct device_attribute dev_attr_handle = { { "handle", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &handle_show, 0 };
705 ssize_t phys_id_show(struct device *dev, struct device_attribute *attr, char *buf);
712 struct device_attribute dev_attr_phys_id = { { "phys_id", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &phys_id_show, 0 };
714 ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf);
721 struct device_attribute dev_attr_vendor = { { "vendor", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &vendor_show, 0 };
723 ssize_t rev_id_show(struct device *dev, struct device_attribute *attr, char *buf);
730 struct device_attribute dev_attr_rev_id = { { "rev_id", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &rev_id_show, 0 };
732 ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf);
739 struct device_attribute dev_attr_device = { { "device", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &device_show, 0 };
741 ssize_t format_show(struct device *dev, struct device_attribute *attr, char *buf);
748 struct device_attribute dev_attr_format = { { "format", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &format_show, 0 };
750 ssize_t serial_show(struct device *dev, struct device_attribute *attr, char *buf);
757 struct device_attribute dev_attr_serial = { { "serial", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &serial_show, 0 };
759 ssize_t flags_show(struct device *dev, struct device_attribute *attr, char *buf);
771 struct device_attribute dev_attr_flags = { { "flags", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &flags_show, 0 };
773 struct attribute *acpi_nfit_dimm_attributes[9U] = { &(dev_attr_handle.attr), &(dev_attr_phys_id.attr), &(dev_attr_vendor.attr), &(dev_attr_device.attr), &(dev_attr_format.attr), &(dev_attr_serial.attr), &(dev_attr_rev_id.attr), &(dev_attr_flags.attr), (struct attribute *)0 };
785 umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n);
796 struct attribute_group acpi_nfit_dimm_attribute_group = { "nfit", &acpi_nfit_dimm_attr_visible, 0, (struct attribute **)(&acpi_nfit_dimm_attributes), 0 };
802 const struct attribute_group *acpi_nfit_dimm_attribute_groups[4U] = { (const struct attribute_group *)(&nvdimm_attribute_group), (const struct attribute_group *)(&nd_device_attribute_group), (const struct attribute_group *)(&acpi_nfit_dimm_attribute_group), (const struct attribute_group *)0 };
809 struct nvdimm * acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, u32 device_handle);
821 int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, u32 device_handle);
849 int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc);
903 void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc);
920 ssize_t range_index_show(struct device *dev, struct device_attribute *attr, char *buf);
928 struct device_attribute dev_attr_range_index = { { "range_index", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &range_index_show, 0 };
930 struct attribute *acpi_nfit_region_attributes[2U] = { &(dev_attr_range_index.attr), (struct attribute *)0 };
935 struct attribute_group acpi_nfit_region_attribute_group = { "nfit", 0, 0, (struct attribute **)(&acpi_nfit_region_attributes), 0 };
940 const struct attribute_group *acpi_nfit_region_attribute_groups[6U] = { (const struct attribute_group *)(&nd_region_attribute_group), (const struct attribute_group *)(&nd_mapping_attribute_group), (const struct attribute_group *)(&nd_device_attribute_group), (const struct attribute_group *)(&nd_numa_attribute_group), (const struct attribute_group *)(&acpi_nfit_region_attribute_group), (const struct attribute_group *)0 };
958 size_t sizeof_nfit_set_info(int num_mappings);
964 int cmp_map(const void *m0, const void *m1);
974 struct acpi_nfit_memory_map * memdev_from_spa(struct acpi_nfit_desc *acpi_desc, u16 range_index, int n);
986 int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, struct nd_region_desc *ndr_desc, struct acpi_nfit_system_address *spa);
1034 u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio);
1049 void wmb_blk(struct nfit_blk *nfit_blk);
1067 u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw);
1078 void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, resource_size_t dpa, unsigned int len, unsigned int write);
1107 int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, resource_size_t dpa, void *iobuf, size_t len, int rw, unsigned int lane);
1158 int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw);
1184 void nfit_spa_mapping_release(struct kref *kref);
1201 struct nfit_spa_mapping * find_spa_mapping(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa);
1215 void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa);
1228 void * __nfit_spa_map(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa, enum spa_map_type type);
1291 void * nfit_spa_map(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa, enum spa_map_type type);
1303 int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, struct acpi_nfit_interleave *idt, u16 interleave_ways);
1318 int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, struct nfit_blk *nfit_blk);
1340 int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
1448 void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
1471 int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, struct acpi_nfit_memory_map *memdev, struct acpi_nfit_system_address *spa);
1521 int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa);
1594 int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc);
1607 int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev);
1688 const char __kstrtab_acpi_nfit_init[15U] = { 'a', 'c', 'p', 'i', '_', 'n', 'f', 'i', 't', '_', 'i', 'n', 'i', 't', '\x0' };
1688 const struct kernel_symbol __ksymtab_acpi_nfit_init;
1688 const struct kernel_symbol __ksymtab_acpi_nfit_init = { (unsigned long)(&acpi_nfit_init), (const char *)(&__kstrtab_acpi_nfit_init) };
1690 struct acpi_nfit_desc * acpi_nfit_desc_init(struct acpi_device *adev);
1728 int acpi_nfit_add(struct acpi_device *adev);
1769 int acpi_nfit_remove(struct acpi_device *adev);
1777 void acpi_nfit_notify(struct acpi_device *adev, u32 event);
1825 const struct acpi_device_id acpi_nfit_ids[2U] = { { { 'A', 'C', 'P', 'I', '0', '0', '1', '2', '\x0' }, 0UL, 0U, 0U }, { { '\x0' }, 0UL, 0U, 0U } };
1829 const struct acpi_device_id __mod_acpi__acpi_nfit_ids_device_table[2U] = { };
1831 struct acpi_driver acpi_nfit_driver = { { 'n', 'f', 'i', 't', '\x0' }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, (const struct acpi_device_id *)(&acpi_nfit_ids), 0U, { &acpi_nfit_add, &acpi_nfit_remove, &acpi_nfit_notify }, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0 };
1841 int nfit_init();
1865 void nfit_exit();
1891 void ldv_check_final_state();
1900 void ldv_initialize();
1903 void ldv_handler_precall();
1906 int nondet_int();
1909 int LDV_IN_INTERRUPT = 0;
1912 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
20 void ldv_stop();
25 int ldv_undef_int();
30 int ldv_undef_int_negative();
8 int ldv_mutex_i_mutex_of_inode = 1;
11 int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock);
37 int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock);
72 int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock);
98 int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock);
123 int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock);
163 void ldv_usb_lock_device_i_mutex_of_inode();
170 int ldv_usb_trylock_device_i_mutex_of_inode();
176 int ldv_usb_lock_device_for_reset_i_mutex_of_inode();
189 void ldv_usb_unlock_device_i_mutex_of_inode();
194 int ldv_mutex_init_mutex_of_acpi_nfit_desc = 1;
197 int ldv_mutex_lock_interruptible_init_mutex_of_acpi_nfit_desc(struct mutex *lock);
223 int ldv_mutex_lock_killable_init_mutex_of_acpi_nfit_desc(struct mutex *lock);
258 int ldv_mutex_trylock_init_mutex_of_acpi_nfit_desc(struct mutex *lock);
284 int ldv_atomic_dec_and_mutex_lock_init_mutex_of_acpi_nfit_desc(atomic_t *cnt, struct mutex *lock);
309 int ldv_mutex_is_locked_init_mutex_of_acpi_nfit_desc(struct mutex *lock);
349 void ldv_usb_lock_device_init_mutex_of_acpi_nfit_desc();
356 int ldv_usb_trylock_device_init_mutex_of_acpi_nfit_desc();
362 int ldv_usb_lock_device_for_reset_init_mutex_of_acpi_nfit_desc();
375 void ldv_usb_unlock_device_init_mutex_of_acpi_nfit_desc();
380 int ldv_mutex_lock = 1;
383 int ldv_mutex_lock_interruptible_lock(struct mutex *lock);
409 int ldv_mutex_lock_killable_lock(struct mutex *lock);
444 int ldv_mutex_trylock_lock(struct mutex *lock);
470 int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock);
495 int ldv_mutex_is_locked_lock(struct mutex *lock);
535 void ldv_usb_lock_device_lock();
542 int ldv_usb_trylock_device_lock();
548 int ldv_usb_lock_device_for_reset_lock();
561 void ldv_usb_unlock_device_lock();
566 int ldv_mutex_mutex_of_device = 1;
569 int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock);
595 int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock);
656 int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock);
681 int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock);
721 void ldv_usb_lock_device_mutex_of_device();
728 int ldv_usb_trylock_device_mutex_of_device();
734 int ldv_usb_lock_device_for_reset_mutex_of_device();
747 void ldv_usb_unlock_device_mutex_of_device();
752 int ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 1;
755 int ldv_mutex_lock_interruptible_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock);
781 int ldv_mutex_lock_killable_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock);
816 int ldv_mutex_trylock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock);
842 int ldv_atomic_dec_and_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(atomic_t *cnt, struct mutex *lock);
907 void ldv_usb_lock_device_spa_map_mutex_of_acpi_nfit_desc();
914 int ldv_usb_trylock_device_spa_map_mutex_of_acpi_nfit_desc();
920 int ldv_usb_lock_device_for_reset_spa_map_mutex_of_acpi_nfit_desc();
933 void ldv_usb_unlock_device_spa_map_mutex_of_acpi_nfit_desc();
return ;
}
-entry_point
{
1914 struct kobject *var_group1;
1915 struct attribute *var_group2;
1916 int var_acpi_nfit_dimm_attr_visible_29_p2;
1917 struct acpi_device *var_group3;
1918 unsigned int var_acpi_nfit_notify_62_p1;
1919 int ldv_s_acpi_nfit_driver_acpi_driver;
1920 int tmp;
1921 int tmp___0;
1922 int tmp___1;
1966 ldv_s_acpi_nfit_driver_acpi_driver = 0;
1946 LDV_IN_INTERRUPT = 1;
1955 ldv_initialize() { /* Function call is skipped due to function is undefined */}
1961 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
1962 -nfit_init()
{
1843 int tmp;
1851 acpi_str_to_uuid((char *)"7305944f-fdda-44e3-b16c-3f22d252e5d0", (u8 *)(&nfit_uuid)) { /* Function call is skipped due to function is undefined */}
1852 acpi_str_to_uuid((char *)"66f0d379-b4f3-4074-ac43-0d3318b78cdb", ((u8 *)(&nfit_uuid)) + 1U) { /* Function call is skipped due to function is undefined */}
1853 acpi_str_to_uuid((char *)"92f701f6-13b4-405d-910b-299367e8234c", ((u8 *)(&nfit_uuid)) + 2U) { /* Function call is skipped due to function is undefined */}
1854 acpi_str_to_uuid((char *)"91af0530-5d86-470e-a6b0-0a2db9408249", ((u8 *)(&nfit_uuid)) + 3U) { /* Function call is skipped due to function is undefined */}
1855 acpi_str_to_uuid((char *)"77ab535a-45fc-624b-5560-f7b281d1f96e", ((u8 *)(&nfit_uuid)) + 4U) { /* Function call is skipped due to function is undefined */}
1856 acpi_str_to_uuid((char *)"3d5abd30-4175-87ce-6d64-d2ade523c4bb", ((u8 *)(&nfit_uuid)) + 5U) { /* Function call is skipped due to function is undefined */}
1857 acpi_str_to_uuid((char *)"5cea02c9-4d07-69d3-269f-4496fbe096f9", ((u8 *)(&nfit_uuid)) + 6U) { /* Function call is skipped due to function is undefined */}
1858 acpi_str_to_uuid((char *)"08018188-42cd-bb48-100f-5387d53ded3d", ((u8 *)(&nfit_uuid)) + 7U) { /* Function call is skipped due to function is undefined */}
1859 acpi_str_to_uuid((char *)"2f10e7a4-9e91-11e4-89d3-123b93f75cba", ((u8 *)(&nfit_uuid)) + 8U) { /* Function call is skipped due to function is undefined */}
1860 acpi_str_to_uuid((char *)"4309ac30-0d11-11e4-9191-0800200c9a66", ((u8 *)(&nfit_uuid)) + 9U) { /* Function call is skipped due to function is undefined */}
1862 tmp = acpi_bus_register_driver(&acpi_nfit_driver) { /* Function call is skipped due to function is undefined */}
1862 return tmp;;
}
1962 assume(!(tmp != 0));
1970 goto ldv_34702;
1970 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
1970 assume(tmp___1 != 0);
1973 goto ldv_34701;
1971 ldv_34701:;
1974 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1974 switch (tmp___0)
1975 assume(!(tmp___0 == 0));
1993 assume(!(tmp___0 == 1));
2010 assume(!(tmp___0 == 2));
2027 assume(tmp___0 == 3);
2035 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2036 -acpi_nfit_notify(var_group3, var_acpi_nfit_notify_62_p1)
{
1779 struct acpi_nfit_desc *acpi_desc;
1780 void *tmp;
1781 struct acpi_buffer buf;
1782 struct acpi_table_nfit *nfit_saved;
1783 struct device *dev;
1784 unsigned int status;
1785 int ret;
1786 struct _ddebug descriptor;
1787 long tmp___0;
1788 struct _ddebug descriptor___0;
1789 long tmp___1;
1790 long tmp___2;
1791 _Bool tmp___3;
1779 -dev_get_drvdata((const struct device *)(&(adev->dev)))
{
898 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
898 return __CPAchecker_TMP_0;;
}
1779 acpi_desc = (struct acpi_nfit_desc *)tmp;
1780 buf.length = 18446744073709551615ULL;
1780 buf.pointer = (void *)0;
1782 dev = &(adev->dev);
1786 descriptor.modname = "nfit";
1786 descriptor.function = "acpi_nfit_notify";
1786 descriptor.filename = "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.c";
1786 descriptor.format = "%s: event: %d\n";
1786 descriptor.lineno = 1786U;
1786 descriptor.flags = 0U;
1786 -__builtin_expect(((long)(descriptor.flags)) & 1L, 0L)
{
52 return exp;;
}
1786 assume(!(tmp___0 != 0L));
1788 -device_lock(dev)
{
957 -ldv_mutex_lock_7(&(dev->mutex))
{
128 -ldv_mutex_lock_mutex_of_device(ldv_func_arg1)
{
624 assume(!(ldv_mutex_mutex_of_device != 1));
626 ldv_mutex_mutex_of_device = 2;
627 return ;;
}
130 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
131 return ;;
}
958 return ;;
}
1789 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->driver);
1789 assume(__CPAchecker_TMP_0 == ((unsigned long)((struct device_driver *)0)));
1791 descriptor___0.modname = "nfit";
1791 descriptor___0.function = "acpi_nfit_notify";
1791 descriptor___0.filename = "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.c";
1791 descriptor___0.format = "%s: no driver found for dev\n";
1791 descriptor___0.lineno = 1791U;
1791 descriptor___0.flags = 0U;
1791 -__builtin_expect(((long)(descriptor___0.flags)) & 1L, 0L)
{
52 return exp;;
}
1791 assume(!(tmp___1 != 0L));
1792 return ;;
}
2043 goto ldv_34696;
2046 ldv_34696:;
2047 ldv_34702:;
1970 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
1970 assume(tmp___1 != 0);
1973 goto ldv_34701;
1971 ldv_34701:;
1974 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1974 switch (tmp___0)
1975 assume(!(tmp___0 == 0));
1993 assume(!(tmp___0 == 1));
2010 assume(!(tmp___0 == 2));
2027 assume(tmp___0 == 3);
2035 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2036 -acpi_nfit_notify(var_group3, var_acpi_nfit_notify_62_p1)
{
1779 struct acpi_nfit_desc *acpi_desc;
1780 void *tmp;
1781 struct acpi_buffer buf;
1782 struct acpi_table_nfit *nfit_saved;
1783 struct device *dev;
1784 unsigned int status;
1785 int ret;
1786 struct _ddebug descriptor;
1787 long tmp___0;
1788 struct _ddebug descriptor___0;
1789 long tmp___1;
1790 long tmp___2;
1791 _Bool tmp___3;
1779 -dev_get_drvdata((const struct device *)(&(adev->dev)))
{
898 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
898 return __CPAchecker_TMP_0;;
}
1779 acpi_desc = (struct acpi_nfit_desc *)tmp;
1780 buf.length = 18446744073709551615ULL;
1780 buf.pointer = (void *)0;
1782 dev = &(adev->dev);
1786 descriptor.modname = "nfit";
1786 descriptor.function = "acpi_nfit_notify";
1786 descriptor.filename = "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.c";
1786 descriptor.format = "%s: event: %d\n";
1786 descriptor.lineno = 1786U;
1786 descriptor.flags = 0U;
1786 -__builtin_expect(((long)(descriptor.flags)) & 1L, 0L)
{
52 return exp;;
}
1786 assume(!(tmp___0 != 0L));
1788 -device_lock(dev)
{
957 -ldv_mutex_lock_7(&(dev->mutex))
{
128 -ldv_mutex_lock_mutex_of_device(ldv_func_arg1)
{
624 assume(ldv_mutex_mutex_of_device != 1);
624 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
}
Source code
1 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of version 2 of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 */ 14 #include <linux/list_sort.h> 15 #include <linux/libnvdimm.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/ndctl.h> 19 #include <linux/list.h> 20 #include <linux/acpi.h> 21 #include <linux/sort.h> 22 #include <linux/pmem.h> 23 #include <linux/io.h> 24 #include <asm/cacheflush.h> 25 #include "nfit.h" 26 27 /* 28 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 29 * irrelevant. 30 */ 31 #include <linux/io-64-nonatomic-hi-lo.h> 32 33 static bool force_enable_dimms; 34 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 35 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 36 37 struct nfit_table_prev { 38 struct list_head spas; 39 struct list_head memdevs; 40 struct list_head dcrs; 41 struct list_head bdws; 42 struct list_head idts; 43 struct list_head flushes; 44 }; 45 46 static u8 nfit_uuid[NFIT_UUID_MAX][16]; 47 48 const u8 *to_nfit_uuid(enum nfit_uuids id) 49 { 50 return nfit_uuid[id]; 51 } 52 EXPORT_SYMBOL(to_nfit_uuid); 53 54 static struct acpi_nfit_desc *to_acpi_nfit_desc( 55 struct nvdimm_bus_descriptor *nd_desc) 56 { 57 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 58 } 59 60 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 61 { 62 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 63 64 /* 65 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 66 * acpi_device. 67 */ 68 if (!nd_desc->provider_name 69 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 70 return NULL; 71 72 return to_acpi_device(acpi_desc->dev); 73 } 74 75 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, 76 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 77 unsigned int buf_len) 78 { 79 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 80 const struct nd_cmd_desc *desc = NULL; 81 union acpi_object in_obj, in_buf, *out_obj; 82 struct device *dev = acpi_desc->dev; 83 const char *cmd_name, *dimm_name; 84 unsigned long dsm_mask; 85 acpi_handle handle; 86 const u8 *uuid; 87 u32 offset; 88 int rc, i; 89 90 if (nvdimm) { 91 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 92 struct acpi_device *adev = nfit_mem->adev; 93 94 if (!adev) 95 return -ENOTTY; 96 dimm_name = nvdimm_name(nvdimm); 97 cmd_name = nvdimm_cmd_name(cmd); 98 dsm_mask = nfit_mem->dsm_mask; 99 desc = nd_cmd_dimm_desc(cmd); 100 uuid = to_nfit_uuid(NFIT_DEV_DIMM); 101 handle = adev->handle; 102 } else { 103 struct acpi_device *adev = to_acpi_dev(acpi_desc); 104 105 cmd_name = nvdimm_bus_cmd_name(cmd); 106 dsm_mask = nd_desc->dsm_mask; 107 desc = nd_cmd_bus_desc(cmd); 108 uuid = to_nfit_uuid(NFIT_DEV_BUS); 109 handle = adev->handle; 110 dimm_name = "bus"; 111 } 112 113 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 114 return -ENOTTY; 115 116 if (!test_bit(cmd, &dsm_mask)) 117 return -ENOTTY; 118 119 in_obj.type = ACPI_TYPE_PACKAGE; 120 in_obj.package.count = 1; 121 in_obj.package.elements = &in_buf; 122 in_buf.type = ACPI_TYPE_BUFFER; 123 in_buf.buffer.pointer = buf; 124 in_buf.buffer.length = 0; 125 126 /* libnvdimm has already validated the input envelope */ 127 for (i = 0; i < desc->in_num; i++) 128 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 129 i, buf); 130 131 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 132 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__, 133 dimm_name, cmd_name, in_buf.buffer.length); 134 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 135 4, in_buf.buffer.pointer, min_t(u32, 128, 136 in_buf.buffer.length), true); 137 } 138 139 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj); 140 if (!out_obj) { 141 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, 142 cmd_name); 143 return -EINVAL; 144 } 145 146 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 147 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", 148 __func__, dimm_name, cmd_name, out_obj->type); 149 rc = -EINVAL; 150 goto out; 151 } 152 153 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 154 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, 155 dimm_name, cmd_name, out_obj->buffer.length); 156 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 157 4, out_obj->buffer.pointer, min_t(u32, 128, 158 out_obj->buffer.length), true); 159 } 160 161 for (i = 0, offset = 0; i < desc->out_num; i++) { 162 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 163 (u32 *) out_obj->buffer.pointer); 164 165 if (offset + out_size > out_obj->buffer.length) { 166 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", 167 __func__, dimm_name, cmd_name, i); 168 break; 169 } 170 171 if (in_buf.buffer.length + offset + out_size > buf_len) { 172 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", 173 __func__, dimm_name, cmd_name, i); 174 rc = -ENXIO; 175 goto out; 176 } 177 memcpy(buf + in_buf.buffer.length + offset, 178 out_obj->buffer.pointer + offset, out_size); 179 offset += out_size; 180 } 181 if (offset + in_buf.buffer.length < buf_len) { 182 if (i >= 1) { 183 /* 184 * status valid, return the number of bytes left 185 * unfilled in the output buffer 186 */ 187 rc = buf_len - offset - in_buf.buffer.length; 188 } else { 189 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 190 __func__, dimm_name, cmd_name, buf_len, 191 offset); 192 rc = -ENXIO; 193 } 194 } else 195 rc = 0; 196 197 out: 198 ACPI_FREE(out_obj); 199 200 return rc; 201 } 202 203 static const char *spa_type_name(u16 type) 204 { 205 static const char *to_name[] = { 206 [NFIT_SPA_VOLATILE] = "volatile", 207 [NFIT_SPA_PM] = "pmem", 208 [NFIT_SPA_DCR] = "dimm-control-region", 209 [NFIT_SPA_BDW] = "block-data-window", 210 [NFIT_SPA_VDISK] = "volatile-disk", 211 [NFIT_SPA_VCD] = "volatile-cd", 212 [NFIT_SPA_PDISK] = "persistent-disk", 213 [NFIT_SPA_PCD] = "persistent-cd", 214 215 }; 216 217 if (type > NFIT_SPA_PCD) 218 return "unknown"; 219 220 return to_name[type]; 221 } 222 223 static int nfit_spa_type(struct acpi_nfit_system_address *spa) 224 { 225 int i; 226 227 for (i = 0; i < NFIT_UUID_MAX; i++) 228 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) 229 return i; 230 return -1; 231 } 232 233 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 234 struct nfit_table_prev *prev, 235 struct acpi_nfit_system_address *spa) 236 { 237 struct device *dev = acpi_desc->dev; 238 struct nfit_spa *nfit_spa; 239 240 list_for_each_entry(nfit_spa, &prev->spas, list) { 241 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 242 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 243 return true; 244 } 245 } 246 247 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL); 248 if (!nfit_spa) 249 return false; 250 INIT_LIST_HEAD(&nfit_spa->list); 251 nfit_spa->spa = spa; 252 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 253 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 254 spa->range_index, 255 spa_type_name(nfit_spa_type(spa))); 256 return true; 257 } 258 259 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 260 struct nfit_table_prev *prev, 261 struct acpi_nfit_memory_map *memdev) 262 { 263 struct device *dev = acpi_desc->dev; 264 struct nfit_memdev *nfit_memdev; 265 266 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 267 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 268 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 269 return true; 270 } 271 272 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL); 273 if (!nfit_memdev) 274 return false; 275 INIT_LIST_HEAD(&nfit_memdev->list); 276 nfit_memdev->memdev = memdev; 277 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 278 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", 279 __func__, memdev->device_handle, memdev->range_index, 280 memdev->region_index); 281 return true; 282 } 283 284 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 285 struct nfit_table_prev *prev, 286 struct acpi_nfit_control_region *dcr) 287 { 288 struct device *dev = acpi_desc->dev; 289 struct nfit_dcr *nfit_dcr; 290 291 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 292 if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { 293 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 294 return true; 295 } 296 297 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL); 298 if (!nfit_dcr) 299 return false; 300 INIT_LIST_HEAD(&nfit_dcr->list); 301 nfit_dcr->dcr = dcr; 302 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 303 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 304 dcr->region_index, dcr->windows); 305 return true; 306 } 307 308 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 309 struct nfit_table_prev *prev, 310 struct acpi_nfit_data_region *bdw) 311 { 312 struct device *dev = acpi_desc->dev; 313 struct nfit_bdw *nfit_bdw; 314 315 list_for_each_entry(nfit_bdw, &prev->bdws, list) 316 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 317 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 318 return true; 319 } 320 321 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL); 322 if (!nfit_bdw) 323 return false; 324 INIT_LIST_HEAD(&nfit_bdw->list); 325 nfit_bdw->bdw = bdw; 326 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 327 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 328 bdw->region_index, bdw->windows); 329 return true; 330 } 331 332 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 333 struct nfit_table_prev *prev, 334 struct acpi_nfit_interleave *idt) 335 { 336 struct device *dev = acpi_desc->dev; 337 struct nfit_idt *nfit_idt; 338 339 list_for_each_entry(nfit_idt, &prev->idts, list) 340 if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { 341 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 342 return true; 343 } 344 345 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL); 346 if (!nfit_idt) 347 return false; 348 INIT_LIST_HEAD(&nfit_idt->list); 349 nfit_idt->idt = idt; 350 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 351 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 352 idt->interleave_index, idt->line_count); 353 return true; 354 } 355 356 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 357 struct nfit_table_prev *prev, 358 struct acpi_nfit_flush_address *flush) 359 { 360 struct device *dev = acpi_desc->dev; 361 struct nfit_flush *nfit_flush; 362 363 list_for_each_entry(nfit_flush, &prev->flushes, list) 364 if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { 365 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 366 return true; 367 } 368 369 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL); 370 if (!nfit_flush) 371 return false; 372 INIT_LIST_HEAD(&nfit_flush->list); 373 nfit_flush->flush = flush; 374 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 375 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 376 flush->device_handle, flush->hint_count); 377 return true; 378 } 379 380 static void *add_table(struct acpi_nfit_desc *acpi_desc, 381 struct nfit_table_prev *prev, void *table, const void *end) 382 { 383 struct device *dev = acpi_desc->dev; 384 struct acpi_nfit_header *hdr; 385 void *err = ERR_PTR(-ENOMEM); 386 387 if (table >= end) 388 return NULL; 389 390 hdr = table; 391 if (!hdr->length) { 392 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 393 hdr->type); 394 return NULL; 395 } 396 397 switch (hdr->type) { 398 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 399 if (!add_spa(acpi_desc, prev, table)) 400 return err; 401 break; 402 case ACPI_NFIT_TYPE_MEMORY_MAP: 403 if (!add_memdev(acpi_desc, prev, table)) 404 return err; 405 break; 406 case ACPI_NFIT_TYPE_CONTROL_REGION: 407 if (!add_dcr(acpi_desc, prev, table)) 408 return err; 409 break; 410 case ACPI_NFIT_TYPE_DATA_REGION: 411 if (!add_bdw(acpi_desc, prev, table)) 412 return err; 413 break; 414 case ACPI_NFIT_TYPE_INTERLEAVE: 415 if (!add_idt(acpi_desc, prev, table)) 416 return err; 417 break; 418 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 419 if (!add_flush(acpi_desc, prev, table)) 420 return err; 421 break; 422 case ACPI_NFIT_TYPE_SMBIOS: 423 dev_dbg(dev, "%s: smbios\n", __func__); 424 break; 425 default: 426 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 427 break; 428 } 429 430 return table + hdr->length; 431 } 432 433 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 434 struct nfit_mem *nfit_mem) 435 { 436 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 437 u16 dcr = nfit_mem->dcr->region_index; 438 struct nfit_spa *nfit_spa; 439 440 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 441 u16 range_index = nfit_spa->spa->range_index; 442 int type = nfit_spa_type(nfit_spa->spa); 443 struct nfit_memdev *nfit_memdev; 444 445 if (type != NFIT_SPA_BDW) 446 continue; 447 448 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 449 if (nfit_memdev->memdev->range_index != range_index) 450 continue; 451 if (nfit_memdev->memdev->device_handle != device_handle) 452 continue; 453 if (nfit_memdev->memdev->region_index != dcr) 454 continue; 455 456 nfit_mem->spa_bdw = nfit_spa->spa; 457 return; 458 } 459 } 460 461 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 462 nfit_mem->spa_dcr->range_index); 463 nfit_mem->bdw = NULL; 464 } 465 466 static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, 467 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 468 { 469 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 470 struct nfit_memdev *nfit_memdev; 471 struct nfit_flush *nfit_flush; 472 struct nfit_dcr *nfit_dcr; 473 struct nfit_bdw *nfit_bdw; 474 struct nfit_idt *nfit_idt; 475 u16 idt_idx, range_index; 476 477 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 478 if (nfit_dcr->dcr->region_index != dcr) 479 continue; 480 nfit_mem->dcr = nfit_dcr->dcr; 481 break; 482 } 483 484 if (!nfit_mem->dcr) { 485 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n", 486 spa->range_index, __to_nfit_memdev(nfit_mem) 487 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR"); 488 return -ENODEV; 489 } 490 491 /* 492 * We've found enough to create an nvdimm, optionally 493 * find an associated BDW 494 */ 495 list_add(&nfit_mem->list, &acpi_desc->dimms); 496 497 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 498 if (nfit_bdw->bdw->region_index != dcr) 499 continue; 500 nfit_mem->bdw = nfit_bdw->bdw; 501 break; 502 } 503 504 if (!nfit_mem->bdw) 505 return 0; 506 507 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 508 509 if (!nfit_mem->spa_bdw) 510 return 0; 511 512 range_index = nfit_mem->spa_bdw->range_index; 513 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 514 if (nfit_memdev->memdev->range_index != range_index || 515 nfit_memdev->memdev->region_index != dcr) 516 continue; 517 nfit_mem->memdev_bdw = nfit_memdev->memdev; 518 idt_idx = nfit_memdev->memdev->interleave_index; 519 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 520 if (nfit_idt->idt->interleave_index != idt_idx) 521 continue; 522 nfit_mem->idt_bdw = nfit_idt->idt; 523 break; 524 } 525 526 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 527 if (nfit_flush->flush->device_handle != 528 nfit_memdev->memdev->device_handle) 529 continue; 530 nfit_mem->nfit_flush = nfit_flush; 531 break; 532 } 533 break; 534 } 535 536 return 0; 537 } 538 539 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 540 struct acpi_nfit_system_address *spa) 541 { 542 struct nfit_mem *nfit_mem, *found; 543 struct nfit_memdev *nfit_memdev; 544 int type = nfit_spa_type(spa); 545 u16 dcr; 546 547 switch (type) { 548 case NFIT_SPA_DCR: 549 case NFIT_SPA_PM: 550 break; 551 default: 552 return 0; 553 } 554 555 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 556 int rc; 557 558 if (nfit_memdev->memdev->range_index != spa->range_index) 559 continue; 560 found = NULL; 561 dcr = nfit_memdev->memdev->region_index; 562 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 563 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { 564 found = nfit_mem; 565 break; 566 } 567 568 if (found) 569 nfit_mem = found; 570 else { 571 nfit_mem = devm_kzalloc(acpi_desc->dev, 572 sizeof(*nfit_mem), GFP_KERNEL); 573 if (!nfit_mem) 574 return -ENOMEM; 575 INIT_LIST_HEAD(&nfit_mem->list); 576 } 577 578 if (type == NFIT_SPA_DCR) { 579 struct nfit_idt *nfit_idt; 580 u16 idt_idx; 581 582 /* multiple dimms may share a SPA when interleaved */ 583 nfit_mem->spa_dcr = spa; 584 nfit_mem->memdev_dcr = nfit_memdev->memdev; 585 idt_idx = nfit_memdev->memdev->interleave_index; 586 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 587 if (nfit_idt->idt->interleave_index != idt_idx) 588 continue; 589 nfit_mem->idt_dcr = nfit_idt->idt; 590 break; 591 } 592 } else { 593 /* 594 * A single dimm may belong to multiple SPA-PM 595 * ranges, record at least one in addition to 596 * any SPA-DCR range. 597 */ 598 nfit_mem->memdev_pmem = nfit_memdev->memdev; 599 } 600 601 if (found) 602 continue; 603 604 rc = nfit_mem_add(acpi_desc, nfit_mem, spa); 605 if (rc) 606 return rc; 607 } 608 609 return 0; 610 } 611 612 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 613 { 614 struct nfit_mem *a = container_of(_a, typeof(*a), list); 615 struct nfit_mem *b = container_of(_b, typeof(*b), list); 616 u32 handleA, handleB; 617 618 handleA = __to_nfit_memdev(a)->device_handle; 619 handleB = __to_nfit_memdev(b)->device_handle; 620 if (handleA < handleB) 621 return -1; 622 else if (handleA > handleB) 623 return 1; 624 return 0; 625 } 626 627 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 628 { 629 struct nfit_spa *nfit_spa; 630 631 /* 632 * For each SPA-DCR or SPA-PMEM address range find its 633 * corresponding MEMDEV(s). From each MEMDEV find the 634 * corresponding DCR. Then, if we're operating on a SPA-DCR, 635 * try to find a SPA-BDW and a corresponding BDW that references 636 * the DCR. Throw it all into an nfit_mem object. Note, that 637 * BDWs are optional. 638 */ 639 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 640 int rc; 641 642 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); 643 if (rc) 644 return rc; 645 } 646 647 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 648 649 return 0; 650 } 651 652 static ssize_t revision_show(struct device *dev, 653 struct device_attribute *attr, char *buf) 654 { 655 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 656 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 657 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 658 659 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); 660 } 661 static DEVICE_ATTR_RO(revision); 662 663 static struct attribute *acpi_nfit_attributes[] = { 664 &dev_attr_revision.attr, 665 NULL, 666 }; 667 668 static struct attribute_group acpi_nfit_attribute_group = { 669 .name = "nfit", 670 .attrs = acpi_nfit_attributes, 671 }; 672 673 const struct attribute_group *acpi_nfit_attribute_groups[] = { 674 &nvdimm_bus_attribute_group, 675 &acpi_nfit_attribute_group, 676 NULL, 677 }; 678 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups); 679 680 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 681 { 682 struct nvdimm *nvdimm = to_nvdimm(dev); 683 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 684 685 return __to_nfit_memdev(nfit_mem); 686 } 687 688 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 689 { 690 struct nvdimm *nvdimm = to_nvdimm(dev); 691 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 692 693 return nfit_mem->dcr; 694 } 695 696 static ssize_t handle_show(struct device *dev, 697 struct device_attribute *attr, char *buf) 698 { 699 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 700 701 return sprintf(buf, "%#x\n", memdev->device_handle); 702 } 703 static DEVICE_ATTR_RO(handle); 704 705 static ssize_t phys_id_show(struct device *dev, 706 struct device_attribute *attr, char *buf) 707 { 708 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 709 710 return sprintf(buf, "%#x\n", memdev->physical_id); 711 } 712 static DEVICE_ATTR_RO(phys_id); 713 714 static ssize_t vendor_show(struct device *dev, 715 struct device_attribute *attr, char *buf) 716 { 717 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 718 719 return sprintf(buf, "%#x\n", dcr->vendor_id); 720 } 721 static DEVICE_ATTR_RO(vendor); 722 723 static ssize_t rev_id_show(struct device *dev, 724 struct device_attribute *attr, char *buf) 725 { 726 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 727 728 return sprintf(buf, "%#x\n", dcr->revision_id); 729 } 730 static DEVICE_ATTR_RO(rev_id); 731 732 static ssize_t device_show(struct device *dev, 733 struct device_attribute *attr, char *buf) 734 { 735 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 736 737 return sprintf(buf, "%#x\n", dcr->device_id); 738 } 739 static DEVICE_ATTR_RO(device); 740 741 static ssize_t format_show(struct device *dev, 742 struct device_attribute *attr, char *buf) 743 { 744 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 745 746 return sprintf(buf, "%#x\n", dcr->code); 747 } 748 static DEVICE_ATTR_RO(format); 749 750 static ssize_t serial_show(struct device *dev, 751 struct device_attribute *attr, char *buf) 752 { 753 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 754 755 return sprintf(buf, "%#x\n", dcr->serial_number); 756 } 757 static DEVICE_ATTR_RO(serial); 758 759 static ssize_t flags_show(struct device *dev, 760 struct device_attribute *attr, char *buf) 761 { 762 u16 flags = to_nfit_memdev(dev)->flags; 763 764 return sprintf(buf, "%s%s%s%s%s\n", 765 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 766 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 767 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 768 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 769 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); 770 } 771 static DEVICE_ATTR_RO(flags); 772 773 static struct attribute *acpi_nfit_dimm_attributes[] = { 774 &dev_attr_handle.attr, 775 &dev_attr_phys_id.attr, 776 &dev_attr_vendor.attr, 777 &dev_attr_device.attr, 778 &dev_attr_format.attr, 779 &dev_attr_serial.attr, 780 &dev_attr_rev_id.attr, 781 &dev_attr_flags.attr, 782 NULL, 783 }; 784 785 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 786 struct attribute *a, int n) 787 { 788 struct device *dev = container_of(kobj, struct device, kobj); 789 790 if (to_nfit_dcr(dev)) 791 return a->mode; 792 else 793 return 0; 794 } 795 796 static struct attribute_group acpi_nfit_dimm_attribute_group = { 797 .name = "nfit", 798 .attrs = acpi_nfit_dimm_attributes, 799 .is_visible = acpi_nfit_dimm_attr_visible, 800 }; 801 802 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 803 &nvdimm_attribute_group, 804 &nd_device_attribute_group, 805 &acpi_nfit_dimm_attribute_group, 806 NULL, 807 }; 808 809 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 810 u32 device_handle) 811 { 812 struct nfit_mem *nfit_mem; 813 814 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 815 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 816 return nfit_mem->nvdimm; 817 818 return NULL; 819 } 820 821 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 822 struct nfit_mem *nfit_mem, u32 device_handle) 823 { 824 struct acpi_device *adev, *adev_dimm; 825 struct device *dev = acpi_desc->dev; 826 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); 827 int i; 828 829 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; 830 adev = to_acpi_dev(acpi_desc); 831 if (!adev) 832 return 0; 833 834 adev_dimm = acpi_find_child_device(adev, device_handle, false); 835 nfit_mem->adev = adev_dimm; 836 if (!adev_dimm) { 837 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 838 device_handle); 839 return force_enable_dimms ? 0 : -ENODEV; 840 } 841 842 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) 843 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) 844 set_bit(i, &nfit_mem->dsm_mask); 845 846 return 0; 847 } 848 849 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 850 { 851 struct nfit_mem *nfit_mem; 852 int dimm_count = 0; 853 854 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 855 struct nvdimm *nvdimm; 856 unsigned long flags = 0; 857 u32 device_handle; 858 u16 mem_flags; 859 int rc; 860 861 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 862 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 863 if (nvdimm) { 864 dimm_count++; 865 continue; 866 } 867 868 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 869 flags |= NDD_ALIASING; 870 871 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 872 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 873 flags |= NDD_UNARMED; 874 875 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 876 if (rc) 877 continue; 878 879 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 880 acpi_nfit_dimm_attribute_groups, 881 flags, &nfit_mem->dsm_mask); 882 if (!nvdimm) 883 return -ENOMEM; 884 885 nfit_mem->nvdimm = nvdimm; 886 dimm_count++; 887 888 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 889 continue; 890 891 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", 892 nvdimm_name(nvdimm), 893 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 894 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 895 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 896 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); 897 898 } 899 900 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 901 } 902 903 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 904 { 905 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 906 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); 907 struct acpi_device *adev; 908 int i; 909 910 nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en; 911 adev = to_acpi_dev(acpi_desc); 912 if (!adev) 913 return; 914 915 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++) 916 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) 917 set_bit(i, &nd_desc->dsm_mask); 918 } 919 920 static ssize_t range_index_show(struct device *dev, 921 struct device_attribute *attr, char *buf) 922 { 923 struct nd_region *nd_region = to_nd_region(dev); 924 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 925 926 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 927 } 928 static DEVICE_ATTR_RO(range_index); 929 930 static struct attribute *acpi_nfit_region_attributes[] = { 931 &dev_attr_range_index.attr, 932 NULL, 933 }; 934 935 static struct attribute_group acpi_nfit_region_attribute_group = { 936 .name = "nfit", 937 .attrs = acpi_nfit_region_attributes, 938 }; 939 940 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 941 &nd_region_attribute_group, 942 &nd_mapping_attribute_group, 943 &nd_device_attribute_group, 944 &nd_numa_attribute_group, 945 &acpi_nfit_region_attribute_group, 946 NULL, 947 }; 948 949 /* enough info to uniquely specify an interleave set */ 950 struct nfit_set_info { 951 struct nfit_set_info_map { 952 u64 region_offset; 953 u32 serial_number; 954 u32 pad; 955 } mapping[0]; 956 }; 957 958 static size_t sizeof_nfit_set_info(int num_mappings) 959 { 960 return sizeof(struct nfit_set_info) 961 + num_mappings * sizeof(struct nfit_set_info_map); 962 } 963 964 static int cmp_map(const void *m0, const void *m1) 965 { 966 const struct nfit_set_info_map *map0 = m0; 967 const struct nfit_set_info_map *map1 = m1; 968 969 return memcmp(&map0->region_offset, &map1->region_offset, 970 sizeof(u64)); 971 } 972 973 /* Retrieve the nth entry referencing this spa */ 974 static struct acpi_nfit_memory_map *memdev_from_spa( 975 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 976 { 977 struct nfit_memdev *nfit_memdev; 978 979 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 980 if (nfit_memdev->memdev->range_index == range_index) 981 if (n-- == 0) 982 return nfit_memdev->memdev; 983 return NULL; 984 } 985 986 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 987 struct nd_region_desc *ndr_desc, 988 struct acpi_nfit_system_address *spa) 989 { 990 int i, spa_type = nfit_spa_type(spa); 991 struct device *dev = acpi_desc->dev; 992 struct nd_interleave_set *nd_set; 993 u16 nr = ndr_desc->num_mappings; 994 struct nfit_set_info *info; 995 996 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) 997 /* pass */; 998 else 999 return 0; 1000 1001 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 1002 if (!nd_set) 1003 return -ENOMEM; 1004 1005 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 1006 if (!info) 1007 return -ENOMEM; 1008 for (i = 0; i < nr; i++) { 1009 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; 1010 struct nfit_set_info_map *map = &info->mapping[i]; 1011 struct nvdimm *nvdimm = nd_mapping->nvdimm; 1012 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1013 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 1014 spa->range_index, i); 1015 1016 if (!memdev || !nfit_mem->dcr) { 1017 dev_err(dev, "%s: failed to find DCR\n", __func__); 1018 return -ENODEV; 1019 } 1020 1021 map->region_offset = memdev->region_offset; 1022 map->serial_number = nfit_mem->dcr->serial_number; 1023 } 1024 1025 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 1026 cmp_map, NULL); 1027 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 1028 ndr_desc->nd_set = nd_set; 1029 devm_kfree(dev, info); 1030 1031 return 0; 1032 } 1033 1034 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 1035 { 1036 struct acpi_nfit_interleave *idt = mmio->idt; 1037 u32 sub_line_offset, line_index, line_offset; 1038 u64 line_no, table_skip_count, table_offset; 1039 1040 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 1041 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 1042 line_offset = idt->line_offset[line_index] 1043 * mmio->line_size; 1044 table_offset = table_skip_count * mmio->table_size; 1045 1046 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1047 } 1048 1049 static void wmb_blk(struct nfit_blk *nfit_blk) 1050 { 1051 1052 if (nfit_blk->nvdimm_flush) { 1053 /* 1054 * The first wmb() is needed to 'sfence' all previous writes 1055 * such that they are architecturally visible for the platform 1056 * buffer flush. Note that we've already arranged for pmem 1057 * writes to avoid the cache via arch_memcpy_to_pmem(). The 1058 * final wmb() ensures ordering for the NVDIMM flush write. 1059 */ 1060 wmb(); 1061 writeq(1, nfit_blk->nvdimm_flush); 1062 wmb(); 1063 } else 1064 wmb_pmem(); 1065 } 1066 1067 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1068 { 1069 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1070 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1071 1072 if (mmio->num_lines) 1073 offset = to_interleave_offset(offset, mmio); 1074 1075 return readl(mmio->addr.base + offset); 1076 } 1077 1078 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 1079 resource_size_t dpa, unsigned int len, unsigned int write) 1080 { 1081 u64 cmd, offset; 1082 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1083 1084 enum { 1085 BCW_OFFSET_MASK = (1ULL << 48)-1, 1086 BCW_LEN_SHIFT = 48, 1087 BCW_LEN_MASK = (1ULL << 8) - 1, 1088 BCW_CMD_SHIFT = 56, 1089 }; 1090 1091 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 1092 len = len >> L1_CACHE_SHIFT; 1093 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 1094 cmd |= ((u64) write) << BCW_CMD_SHIFT; 1095 1096 offset = nfit_blk->cmd_offset + mmio->size * bw; 1097 if (mmio->num_lines) 1098 offset = to_interleave_offset(offset, mmio); 1099 1100 writeq(cmd, mmio->addr.base + offset); 1101 wmb_blk(nfit_blk); 1102 1103 if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) 1104 readq(mmio->addr.base + offset); 1105 } 1106 1107 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1108 resource_size_t dpa, void *iobuf, size_t len, int rw, 1109 unsigned int lane) 1110 { 1111 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1112 unsigned int copied = 0; 1113 u64 base_offset; 1114 int rc; 1115 1116 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1117 + lane * mmio->size; 1118 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1119 while (len) { 1120 unsigned int c; 1121 u64 offset; 1122 1123 if (mmio->num_lines) { 1124 u32 line_offset; 1125 1126 offset = to_interleave_offset(base_offset + copied, 1127 mmio); 1128 div_u64_rem(offset, mmio->line_size, &line_offset); 1129 c = min_t(size_t, len, mmio->line_size - line_offset); 1130 } else { 1131 offset = base_offset + nfit_blk->bdw_offset; 1132 c = len; 1133 } 1134 1135 if (rw) 1136 memcpy_to_pmem(mmio->addr.aperture + offset, 1137 iobuf + copied, c); 1138 else { 1139 if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH) 1140 mmio_flush_range((void __force *) 1141 mmio->addr.aperture + offset, c); 1142 1143 memcpy_from_pmem(iobuf + copied, 1144 mmio->addr.aperture + offset, c); 1145 } 1146 1147 copied += c; 1148 len -= c; 1149 } 1150 1151 if (rw) 1152 wmb_blk(nfit_blk); 1153 1154 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1155 return rc; 1156 } 1157 1158 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 1159 resource_size_t dpa, void *iobuf, u64 len, int rw) 1160 { 1161 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1162 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1163 struct nd_region *nd_region = nfit_blk->nd_region; 1164 unsigned int lane, copied = 0; 1165 int rc = 0; 1166 1167 lane = nd_region_acquire_lane(nd_region); 1168 while (len) { 1169 u64 c = min(len, mmio->size); 1170 1171 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 1172 iobuf + copied, c, rw, lane); 1173 if (rc) 1174 break; 1175 1176 copied += c; 1177 len -= c; 1178 } 1179 nd_region_release_lane(nd_region, lane); 1180 1181 return rc; 1182 } 1183 1184 static void nfit_spa_mapping_release(struct kref *kref) 1185 { 1186 struct nfit_spa_mapping *spa_map = to_spa_map(kref); 1187 struct acpi_nfit_system_address *spa = spa_map->spa; 1188 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc; 1189 1190 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1191 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); 1192 if (spa_map->type == SPA_MAP_APERTURE) 1193 memunmap((void __force *)spa_map->addr.aperture); 1194 else 1195 iounmap(spa_map->addr.base); 1196 release_mem_region(spa->address, spa->length); 1197 list_del(&spa_map->list); 1198 kfree(spa_map); 1199 } 1200 1201 static struct nfit_spa_mapping *find_spa_mapping( 1202 struct acpi_nfit_desc *acpi_desc, 1203 struct acpi_nfit_system_address *spa) 1204 { 1205 struct nfit_spa_mapping *spa_map; 1206 1207 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1208 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list) 1209 if (spa_map->spa == spa) 1210 return spa_map; 1211 1212 return NULL; 1213 } 1214 1215 static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, 1216 struct acpi_nfit_system_address *spa) 1217 { 1218 struct nfit_spa_mapping *spa_map; 1219 1220 mutex_lock(&acpi_desc->spa_map_mutex); 1221 spa_map = find_spa_mapping(acpi_desc, spa); 1222 1223 if (spa_map) 1224 kref_put(&spa_map->kref, nfit_spa_mapping_release); 1225 mutex_unlock(&acpi_desc->spa_map_mutex); 1226 } 1227 1228 static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1229 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1230 { 1231 resource_size_t start = spa->address; 1232 resource_size_t n = spa->length; 1233 struct nfit_spa_mapping *spa_map; 1234 struct resource *res; 1235 1236 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1237 1238 spa_map = find_spa_mapping(acpi_desc, spa); 1239 if (spa_map) { 1240 kref_get(&spa_map->kref); 1241 return spa_map->addr.base; 1242 } 1243 1244 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); 1245 if (!spa_map) 1246 return NULL; 1247 1248 INIT_LIST_HEAD(&spa_map->list); 1249 spa_map->spa = spa; 1250 kref_init(&spa_map->kref); 1251 spa_map->acpi_desc = acpi_desc; 1252 1253 res = request_mem_region(start, n, dev_name(acpi_desc->dev)); 1254 if (!res) 1255 goto err_mem; 1256 1257 spa_map->type = type; 1258 if (type == SPA_MAP_APERTURE) 1259 spa_map->addr.aperture = (void __pmem *)memremap(start, n, 1260 ARCH_MEMREMAP_PMEM); 1261 else 1262 spa_map->addr.base = ioremap_nocache(start, n); 1263 1264 1265 if (!spa_map->addr.base) 1266 goto err_map; 1267 1268 list_add_tail(&spa_map->list, &acpi_desc->spa_maps); 1269 return spa_map->addr.base; 1270 1271 err_map: 1272 release_mem_region(start, n); 1273 err_mem: 1274 kfree(spa_map); 1275 return NULL; 1276 } 1277 1278 /** 1279 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges 1280 * @nvdimm_bus: NFIT-bus that provided the spa table entry 1281 * @nfit_spa: spa table to map 1282 * @type: aperture or control region 1283 * 1284 * In the case where block-data-window apertures and 1285 * dimm-control-regions are interleaved they will end up sharing a 1286 * single request_mem_region() + ioremap() for the address range. In 1287 * the style of devm nfit_spa_map() mappings are automatically dropped 1288 * when all region devices referencing the same mapping are disabled / 1289 * unbound. 1290 */ 1291 static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1292 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1293 { 1294 void __iomem *iomem; 1295 1296 mutex_lock(&acpi_desc->spa_map_mutex); 1297 iomem = __nfit_spa_map(acpi_desc, spa, type); 1298 mutex_unlock(&acpi_desc->spa_map_mutex); 1299 1300 return iomem; 1301 } 1302 1303 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1304 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1305 { 1306 if (idt) { 1307 mmio->num_lines = idt->line_count; 1308 mmio->line_size = idt->line_size; 1309 if (interleave_ways == 0) 1310 return -ENXIO; 1311 mmio->table_size = mmio->num_lines * interleave_ways 1312 * mmio->line_size; 1313 } 1314 1315 return 0; 1316 } 1317 1318 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 1319 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 1320 { 1321 struct nd_cmd_dimm_flags flags; 1322 int rc; 1323 1324 memset(&flags, 0, sizeof(flags)); 1325 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 1326 sizeof(flags)); 1327 1328 if (rc >= 0 && flags.status == 0) 1329 nfit_blk->dimm_flags = flags.flags; 1330 else if (rc == -ENOTTY) { 1331 /* fall back to a conservative default */ 1332 nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH; 1333 rc = 0; 1334 } else 1335 rc = -ENXIO; 1336 1337 return rc; 1338 } 1339 1340 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1341 struct device *dev) 1342 { 1343 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1344 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1345 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1346 struct nfit_flush *nfit_flush; 1347 struct nfit_blk_mmio *mmio; 1348 struct nfit_blk *nfit_blk; 1349 struct nfit_mem *nfit_mem; 1350 struct nvdimm *nvdimm; 1351 int rc; 1352 1353 nvdimm = nd_blk_region_to_dimm(ndbr); 1354 nfit_mem = nvdimm_provider_data(nvdimm); 1355 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1356 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1357 nfit_mem ? "" : " nfit_mem", 1358 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 1359 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 1360 return -ENXIO; 1361 } 1362 1363 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 1364 if (!nfit_blk) 1365 return -ENOMEM; 1366 nd_blk_region_set_provider_data(ndbr, nfit_blk); 1367 nfit_blk->nd_region = to_nd_region(dev); 1368 1369 /* map block aperture memory */ 1370 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1371 mmio = &nfit_blk->mmio[BDW]; 1372 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, 1373 SPA_MAP_APERTURE); 1374 if (!mmio->addr.base) { 1375 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1376 nvdimm_name(nvdimm)); 1377 return -ENOMEM; 1378 } 1379 mmio->size = nfit_mem->bdw->size; 1380 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 1381 mmio->idt = nfit_mem->idt_bdw; 1382 mmio->spa = nfit_mem->spa_bdw; 1383 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 1384 nfit_mem->memdev_bdw->interleave_ways); 1385 if (rc) { 1386 dev_dbg(dev, "%s: %s failed to init bdw interleave\n", 1387 __func__, nvdimm_name(nvdimm)); 1388 return rc; 1389 } 1390 1391 /* map block control memory */ 1392 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1393 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1394 mmio = &nfit_blk->mmio[DCR]; 1395 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, 1396 SPA_MAP_CONTROL); 1397 if (!mmio->addr.base) { 1398 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1399 nvdimm_name(nvdimm)); 1400 return -ENOMEM; 1401 } 1402 mmio->size = nfit_mem->dcr->window_size; 1403 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 1404 mmio->idt = nfit_mem->idt_dcr; 1405 mmio->spa = nfit_mem->spa_dcr; 1406 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 1407 nfit_mem->memdev_dcr->interleave_ways); 1408 if (rc) { 1409 dev_dbg(dev, "%s: %s failed to init dcr interleave\n", 1410 __func__, nvdimm_name(nvdimm)); 1411 return rc; 1412 } 1413 1414 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 1415 if (rc < 0) { 1416 dev_dbg(dev, "%s: %s failed get DIMM flags\n", 1417 __func__, nvdimm_name(nvdimm)); 1418 return rc; 1419 } 1420 1421 nfit_flush = nfit_mem->nfit_flush; 1422 if (nfit_flush && nfit_flush->flush->hint_count != 0) { 1423 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev, 1424 nfit_flush->flush->hint_address[0], 8); 1425 if (!nfit_blk->nvdimm_flush) 1426 return -ENOMEM; 1427 } 1428 1429 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) 1430 dev_warn(dev, "unable to guarantee persistence of writes\n"); 1431 1432 if (mmio->line_size == 0) 1433 return 0; 1434 1435 if ((u32) nfit_blk->cmd_offset % mmio->line_size 1436 + 8 > mmio->line_size) { 1437 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 1438 return -ENXIO; 1439 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 1440 + 8 > mmio->line_size) { 1441 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 1442 return -ENXIO; 1443 } 1444 1445 return 0; 1446 } 1447 1448 static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, 1449 struct device *dev) 1450 { 1451 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1452 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1453 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1454 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1455 int i; 1456 1457 if (!nfit_blk) 1458 return; /* never enabled */ 1459 1460 /* auto-free BLK spa mappings */ 1461 for (i = 0; i < 2; i++) { 1462 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; 1463 1464 if (mmio->addr.base) 1465 nfit_spa_unmap(acpi_desc, mmio->spa); 1466 } 1467 nd_blk_region_set_provider_data(ndbr, NULL); 1468 /* devm will free nfit_blk */ 1469 } 1470 1471 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 1472 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, 1473 struct acpi_nfit_memory_map *memdev, 1474 struct acpi_nfit_system_address *spa) 1475 { 1476 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 1477 memdev->device_handle); 1478 struct nd_blk_region_desc *ndbr_desc; 1479 struct nfit_mem *nfit_mem; 1480 int blk_valid = 0; 1481 1482 if (!nvdimm) { 1483 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 1484 spa->range_index, memdev->device_handle); 1485 return -ENODEV; 1486 } 1487 1488 nd_mapping->nvdimm = nvdimm; 1489 switch (nfit_spa_type(spa)) { 1490 case NFIT_SPA_PM: 1491 case NFIT_SPA_VOLATILE: 1492 nd_mapping->start = memdev->address; 1493 nd_mapping->size = memdev->region_size; 1494 break; 1495 case NFIT_SPA_DCR: 1496 nfit_mem = nvdimm_provider_data(nvdimm); 1497 if (!nfit_mem || !nfit_mem->bdw) { 1498 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 1499 spa->range_index, nvdimm_name(nvdimm)); 1500 } else { 1501 nd_mapping->size = nfit_mem->bdw->capacity; 1502 nd_mapping->start = nfit_mem->bdw->start_address; 1503 ndr_desc->num_lanes = nfit_mem->bdw->windows; 1504 blk_valid = 1; 1505 } 1506 1507 ndr_desc->nd_mapping = nd_mapping; 1508 ndr_desc->num_mappings = blk_valid; 1509 ndbr_desc = to_blk_region_desc(ndr_desc); 1510 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1511 ndbr_desc->disable = acpi_nfit_blk_region_disable; 1512 ndbr_desc->do_io = acpi_desc->blk_do_io; 1513 if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc)) 1514 return -ENOMEM; 1515 break; 1516 } 1517 1518 return 0; 1519 } 1520 1521 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 1522 struct nfit_spa *nfit_spa) 1523 { 1524 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; 1525 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1526 struct nd_blk_region_desc ndbr_desc; 1527 struct nd_region_desc *ndr_desc; 1528 struct nfit_memdev *nfit_memdev; 1529 struct nvdimm_bus *nvdimm_bus; 1530 struct resource res; 1531 int count = 0, rc; 1532 1533 if (nfit_spa->is_registered) 1534 return 0; 1535 1536 if (spa->range_index == 0) { 1537 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 1538 __func__); 1539 return 0; 1540 } 1541 1542 memset(&res, 0, sizeof(res)); 1543 memset(&nd_mappings, 0, sizeof(nd_mappings)); 1544 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 1545 res.start = spa->address; 1546 res.end = res.start + spa->length - 1; 1547 ndr_desc = &ndbr_desc.ndr_desc; 1548 ndr_desc->res = &res; 1549 ndr_desc->provider_data = nfit_spa; 1550 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 1551 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 1552 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 1553 spa->proximity_domain); 1554 else 1555 ndr_desc->numa_node = NUMA_NO_NODE; 1556 1557 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1558 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1559 struct nd_mapping *nd_mapping; 1560 1561 if (memdev->range_index != spa->range_index) 1562 continue; 1563 if (count >= ND_MAX_MAPPINGS) { 1564 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 1565 spa->range_index, ND_MAX_MAPPINGS); 1566 return -ENXIO; 1567 } 1568 nd_mapping = &nd_mappings[count++]; 1569 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, 1570 memdev, spa); 1571 if (rc) 1572 return rc; 1573 } 1574 1575 ndr_desc->nd_mapping = nd_mappings; 1576 ndr_desc->num_mappings = count; 1577 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 1578 if (rc) 1579 return rc; 1580 1581 nvdimm_bus = acpi_desc->nvdimm_bus; 1582 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 1583 if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc)) 1584 return -ENOMEM; 1585 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { 1586 if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc)) 1587 return -ENOMEM; 1588 } 1589 1590 nfit_spa->is_registered = 1; 1591 return 0; 1592 } 1593 1594 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 1595 { 1596 struct nfit_spa *nfit_spa; 1597 1598 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1599 int rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 1600 1601 if (rc) 1602 return rc; 1603 } 1604 return 0; 1605 } 1606 1607 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 1608 struct nfit_table_prev *prev) 1609 { 1610 struct device *dev = acpi_desc->dev; 1611 1612 if (!list_empty(&prev->spas) || 1613 !list_empty(&prev->memdevs) || 1614 !list_empty(&prev->dcrs) || 1615 !list_empty(&prev->bdws) || 1616 !list_empty(&prev->idts) || 1617 !list_empty(&prev->flushes)) { 1618 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 1619 return -ENXIO; 1620 } 1621 return 0; 1622 } 1623 1624 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) 1625 { 1626 struct device *dev = acpi_desc->dev; 1627 struct nfit_table_prev prev; 1628 const void *end; 1629 u8 *data; 1630 int rc; 1631 1632 mutex_lock(&acpi_desc->init_mutex); 1633 1634 INIT_LIST_HEAD(&prev.spas); 1635 INIT_LIST_HEAD(&prev.memdevs); 1636 INIT_LIST_HEAD(&prev.dcrs); 1637 INIT_LIST_HEAD(&prev.bdws); 1638 INIT_LIST_HEAD(&prev.idts); 1639 INIT_LIST_HEAD(&prev.flushes); 1640 1641 list_cut_position(&prev.spas, &acpi_desc->spas, 1642 acpi_desc->spas.prev); 1643 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 1644 acpi_desc->memdevs.prev); 1645 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 1646 acpi_desc->dcrs.prev); 1647 list_cut_position(&prev.bdws, &acpi_desc->bdws, 1648 acpi_desc->bdws.prev); 1649 list_cut_position(&prev.idts, &acpi_desc->idts, 1650 acpi_desc->idts.prev); 1651 list_cut_position(&prev.flushes, &acpi_desc->flushes, 1652 acpi_desc->flushes.prev); 1653 1654 data = (u8 *) acpi_desc->nfit; 1655 end = data + sz; 1656 data += sizeof(struct acpi_table_nfit); 1657 while (!IS_ERR_OR_NULL(data)) 1658 data = add_table(acpi_desc, &prev, data, end); 1659 1660 if (IS_ERR(data)) { 1661 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, 1662 PTR_ERR(data)); 1663 rc = PTR_ERR(data); 1664 goto out_unlock; 1665 } 1666 1667 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 1668 if (rc) 1669 goto out_unlock; 1670 1671 if (nfit_mem_init(acpi_desc) != 0) { 1672 rc = -ENOMEM; 1673 goto out_unlock; 1674 } 1675 1676 acpi_nfit_init_dsms(acpi_desc); 1677 1678 rc = acpi_nfit_register_dimms(acpi_desc); 1679 if (rc) 1680 goto out_unlock; 1681 1682 rc = acpi_nfit_register_regions(acpi_desc); 1683 1684 out_unlock: 1685 mutex_unlock(&acpi_desc->init_mutex); 1686 return rc; 1687 } 1688 EXPORT_SYMBOL_GPL(acpi_nfit_init); 1689 1690 static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev) 1691 { 1692 struct nvdimm_bus_descriptor *nd_desc; 1693 struct acpi_nfit_desc *acpi_desc; 1694 struct device *dev = &adev->dev; 1695 1696 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 1697 if (!acpi_desc) 1698 return ERR_PTR(-ENOMEM); 1699 1700 dev_set_drvdata(dev, acpi_desc); 1701 acpi_desc->dev = dev; 1702 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 1703 nd_desc = &acpi_desc->nd_desc; 1704 nd_desc->provider_name = "ACPI.NFIT"; 1705 nd_desc->ndctl = acpi_nfit_ctl; 1706 nd_desc->attr_groups = acpi_nfit_attribute_groups; 1707 1708 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc); 1709 if (!acpi_desc->nvdimm_bus) { 1710 devm_kfree(dev, acpi_desc); 1711 return ERR_PTR(-ENXIO); 1712 } 1713 1714 INIT_LIST_HEAD(&acpi_desc->spa_maps); 1715 INIT_LIST_HEAD(&acpi_desc->spas); 1716 INIT_LIST_HEAD(&acpi_desc->dcrs); 1717 INIT_LIST_HEAD(&acpi_desc->bdws); 1718 INIT_LIST_HEAD(&acpi_desc->idts); 1719 INIT_LIST_HEAD(&acpi_desc->flushes); 1720 INIT_LIST_HEAD(&acpi_desc->memdevs); 1721 INIT_LIST_HEAD(&acpi_desc->dimms); 1722 mutex_init(&acpi_desc->spa_map_mutex); 1723 mutex_init(&acpi_desc->init_mutex); 1724 1725 return acpi_desc; 1726 } 1727 1728 static int acpi_nfit_add(struct acpi_device *adev) 1729 { 1730 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1731 struct acpi_nfit_desc *acpi_desc; 1732 struct device *dev = &adev->dev; 1733 struct acpi_table_header *tbl; 1734 acpi_status status = AE_OK; 1735 acpi_size sz; 1736 int rc; 1737 1738 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz); 1739 if (ACPI_FAILURE(status)) { 1740 /* This is ok, we could have an nvdimm hotplugged later */ 1741 dev_dbg(dev, "failed to find NFIT at startup\n"); 1742 return 0; 1743 } 1744 1745 acpi_desc = acpi_nfit_desc_init(adev); 1746 if (IS_ERR(acpi_desc)) { 1747 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 1748 __func__, PTR_ERR(acpi_desc)); 1749 return PTR_ERR(acpi_desc); 1750 } 1751 1752 acpi_desc->nfit = (struct acpi_table_nfit *) tbl; 1753 1754 /* Evaluate _FIT and override with that if present */ 1755 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1756 if (ACPI_SUCCESS(status) && buf.length > 0) { 1757 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1758 sz = buf.length; 1759 } 1760 1761 rc = acpi_nfit_init(acpi_desc, sz); 1762 if (rc) { 1763 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1764 return rc; 1765 } 1766 return 0; 1767 } 1768 1769 static int acpi_nfit_remove(struct acpi_device *adev) 1770 { 1771 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1772 1773 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1774 return 0; 1775 } 1776 1777 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 1778 { 1779 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1780 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1781 struct acpi_table_nfit *nfit_saved; 1782 struct device *dev = &adev->dev; 1783 acpi_status status; 1784 int ret; 1785 1786 dev_dbg(dev, "%s: event: %d\n", __func__, event); 1787 1788 device_lock(dev); 1789 if (!dev->driver) { 1790 /* dev->driver may be null if we're being removed */ 1791 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 1792 return; 1793 } 1794 1795 if (!acpi_desc) { 1796 acpi_desc = acpi_nfit_desc_init(adev); 1797 if (IS_ERR(acpi_desc)) { 1798 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 1799 __func__, PTR_ERR(acpi_desc)); 1800 goto out_unlock; 1801 } 1802 } 1803 1804 /* Evaluate _FIT */ 1805 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1806 if (ACPI_FAILURE(status)) { 1807 dev_err(dev, "failed to evaluate _FIT\n"); 1808 goto out_unlock; 1809 } 1810 1811 nfit_saved = acpi_desc->nfit; 1812 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1813 ret = acpi_nfit_init(acpi_desc, buf.length); 1814 if (!ret) { 1815 /* Merge failed, restore old nfit, and exit */ 1816 acpi_desc->nfit = nfit_saved; 1817 dev_err(dev, "failed to merge updated NFIT\n"); 1818 } 1819 kfree(buf.pointer); 1820 1821 out_unlock: 1822 device_unlock(dev); 1823 } 1824 1825 static const struct acpi_device_id acpi_nfit_ids[] = { 1826 { "ACPI0012", 0 }, 1827 { "", 0 }, 1828 }; 1829 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 1830 1831 static struct acpi_driver acpi_nfit_driver = { 1832 .name = KBUILD_MODNAME, 1833 .ids = acpi_nfit_ids, 1834 .ops = { 1835 .add = acpi_nfit_add, 1836 .remove = acpi_nfit_remove, 1837 .notify = acpi_nfit_notify, 1838 }, 1839 }; 1840 1841 static __init int nfit_init(void) 1842 { 1843 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 1844 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 1845 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 1846 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 1847 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 1848 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 1849 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 1850 1851 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); 1852 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); 1853 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); 1854 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); 1855 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); 1856 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); 1857 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); 1858 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); 1859 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); 1860 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 1861 1862 return acpi_bus_register_driver(&acpi_nfit_driver); 1863 } 1864 1865 static __exit void nfit_exit(void) 1866 { 1867 acpi_bus_unregister_driver(&acpi_nfit_driver); 1868 } 1869 1870 module_init(nfit_init); 1871 module_exit(nfit_exit); 1872 MODULE_LICENSE("GPL v2"); 1873 MODULE_AUTHOR("Intel Corporation"); 1874 1875 1876 1877 1878 1879 /* LDV_COMMENT_BEGIN_MAIN */ 1880 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 1881 1882 /*###########################################################################*/ 1883 1884 /*############## Driver Environment Generator 0.2 output ####################*/ 1885 1886 /*###########################################################################*/ 1887 1888 1889 1890 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1891 void ldv_check_final_state(void); 1892 1893 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1894 void ldv_check_return_value(int res); 1895 1896 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1897 void ldv_check_return_value_probe(int res); 1898 1899 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1900 void ldv_initialize(void); 1901 1902 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1903 void ldv_handler_precall(void); 1904 1905 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1906 int nondet_int(void); 1907 1908 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1909 int LDV_IN_INTERRUPT; 1910 1911 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1912 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 1913 1914 1915 1916 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 1917 /*============================= VARIABLE DECLARATION PART =============================*/ 1918 /** STRUCT: struct type: attribute_group, struct name: acpi_nfit_dimm_attribute_group **/ 1919 /* content: static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n)*/ 1920 /* LDV_COMMENT_END_PREP */ 1921 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 1922 struct kobject * var_group1; 1923 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 1924 struct attribute * var_group2; 1925 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 1926 int var_acpi_nfit_dimm_attr_visible_29_p2; 1927 1928 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 1929 /* content: static int acpi_nfit_add(struct acpi_device *adev)*/ 1930 /* LDV_COMMENT_END_PREP */ 1931 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_add" */ 1932 struct acpi_device * var_group3; 1933 /* content: static int acpi_nfit_remove(struct acpi_device *adev)*/ 1934 /* LDV_COMMENT_END_PREP */ 1935 /* content: static void acpi_nfit_notify(struct acpi_device *adev, u32 event)*/ 1936 /* LDV_COMMENT_END_PREP */ 1937 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_notify" */ 1938 u32 var_acpi_nfit_notify_62_p1; 1939 1940 1941 1942 1943 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 1944 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 1945 /*============================= VARIABLE INITIALIZING PART =============================*/ 1946 LDV_IN_INTERRUPT=1; 1947 1948 1949 1950 1951 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 1952 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 1953 /*============================= FUNCTION CALL SECTION =============================*/ 1954 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 1955 ldv_initialize(); 1956 1957 /** INIT: init_type: ST_MODULE_INIT **/ 1958 /* content: static __init int nfit_init(void)*/ 1959 /* LDV_COMMENT_END_PREP */ 1960 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 1961 ldv_handler_precall(); 1962 if(nfit_init()) 1963 goto ldv_final; 1964 1965 1966 int ldv_s_acpi_nfit_driver_acpi_driver = 0; 1967 1968 1969 1970 while( nondet_int() 1971 || !(ldv_s_acpi_nfit_driver_acpi_driver == 0) 1972 ) { 1973 1974 switch(nondet_int()) { 1975 1976 case 0: { 1977 1978 /** STRUCT: struct type: attribute_group, struct name: acpi_nfit_dimm_attribute_group **/ 1979 1980 1981 /* content: static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n)*/ 1982 /* LDV_COMMENT_END_PREP */ 1983 /* LDV_COMMENT_FUNCTION_CALL Function from field "is_visible" from driver structure with callbacks "acpi_nfit_dimm_attribute_group" */ 1984 ldv_handler_precall(); 1985 acpi_nfit_dimm_attr_visible( var_group1, var_group2, var_acpi_nfit_dimm_attr_visible_29_p2); 1986 1987 1988 1989 1990 } 1991 1992 break; 1993 case 1: { 1994 1995 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 1996 if(ldv_s_acpi_nfit_driver_acpi_driver==0) { 1997 1998 /* content: static int acpi_nfit_remove(struct acpi_device *adev)*/ 1999 /* LDV_COMMENT_END_PREP */ 2000 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "acpi_nfit_driver" */ 2001 ldv_handler_precall(); 2002 acpi_nfit_remove( var_group3); 2003 ldv_s_acpi_nfit_driver_acpi_driver=0; 2004 2005 } 2006 2007 } 2008 2009 break; 2010 case 2: { 2011 2012 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2013 2014 2015 /* content: static int acpi_nfit_add(struct acpi_device *adev)*/ 2016 /* LDV_COMMENT_END_PREP */ 2017 /* LDV_COMMENT_FUNCTION_CALL Function from field "add" from driver structure with callbacks "acpi_nfit_driver" */ 2018 ldv_handler_precall(); 2019 acpi_nfit_add( var_group3); 2020 2021 2022 2023 2024 } 2025 2026 break; 2027 case 3: { 2028 2029 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2030 2031 2032 /* content: static void acpi_nfit_notify(struct acpi_device *adev, u32 event)*/ 2033 /* LDV_COMMENT_END_PREP */ 2034 /* LDV_COMMENT_FUNCTION_CALL Function from field "notify" from driver structure with callbacks "acpi_nfit_driver" */ 2035 ldv_handler_precall(); 2036 acpi_nfit_notify( var_group3, var_acpi_nfit_notify_62_p1); 2037 2038 2039 2040 2041 } 2042 2043 break; 2044 default: break; 2045 2046 } 2047 2048 } 2049 2050 ldv_module_exit: 2051 2052 /** INIT: init_type: ST_MODULE_EXIT **/ 2053 /* content: static __exit void nfit_exit(void)*/ 2054 /* LDV_COMMENT_END_PREP */ 2055 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 2056 ldv_handler_precall(); 2057 nfit_exit(); 2058 2059 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 2060 ldv_final: ldv_check_final_state(); 2061 2062 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 2063 return; 2064 2065 } 2066 #endif 2067 2068 /* LDV_COMMENT_END_MAIN */
1 2 #include <linux/kernel.h> 3 bool ldv_is_err(const void *ptr); 4 bool ldv_is_err_or_null(const void *ptr); 5 void* ldv_err_ptr(long error); 6 long ldv_ptr_err(const void *ptr); 7 8 #include <linux/mutex.h> 9 #include <verifier/rcv.h> 10 11 struct usb_device; 12 extern void __ldv_usb_lock_device(struct usb_device *udev); 13 extern void __ldv_usb_unlock_device(struct usb_device *udev); 14 extern int __ldv_usb_trylock_device(struct usb_device *udev); 15 16 extern int mutex_lock_interruptible(struct mutex *lock); 17 extern int mutex_lock_killable(struct mutex *lock); 18 extern void mutex_lock(struct mutex *lock); 19 extern int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock); 20 extern int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock); 21 extern void ldv_mutex_lock_nested_i_mutex_of_inode(struct mutex *lock, unsigned int subclass); 22 extern void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock); 23 extern int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock); 24 extern int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock); 25 extern int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock); 26 extern void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock); 27 28 extern void ldv_usb_lock_device_i_mutex_of_inode(void); 29 extern void ldv_usb_unlock_device_i_mutex_of_inode(void); 30 extern int ldv_usb_trylock_device_i_mutex_of_inode(void); 31 extern int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void); 32 extern int ldv_mutex_lock_interruptible_init_mutex_of_acpi_nfit_desc(struct mutex *lock); 33 extern int ldv_mutex_lock_killable_init_mutex_of_acpi_nfit_desc(struct mutex *lock); 34 extern void ldv_mutex_lock_nested_init_mutex_of_acpi_nfit_desc(struct mutex *lock, unsigned int subclass); 35 extern void ldv_mutex_lock_init_mutex_of_acpi_nfit_desc(struct mutex *lock); 36 extern int ldv_mutex_trylock_init_mutex_of_acpi_nfit_desc(struct mutex *lock); 37 extern int ldv_atomic_dec_and_mutex_lock_init_mutex_of_acpi_nfit_desc(atomic_t *cnt, struct mutex *lock); 38 extern int ldv_mutex_is_locked_init_mutex_of_acpi_nfit_desc(struct mutex *lock); 39 extern void ldv_mutex_unlock_init_mutex_of_acpi_nfit_desc(struct mutex *lock); 40 41 extern void ldv_usb_lock_device_init_mutex_of_acpi_nfit_desc(void); 42 extern void ldv_usb_unlock_device_init_mutex_of_acpi_nfit_desc(void); 43 extern int ldv_usb_trylock_device_init_mutex_of_acpi_nfit_desc(void); 44 extern int ldv_usb_lock_device_for_reset_init_mutex_of_acpi_nfit_desc(void); 45 extern int ldv_mutex_lock_interruptible_lock(struct mutex *lock); 46 extern int ldv_mutex_lock_killable_lock(struct mutex *lock); 47 extern void ldv_mutex_lock_nested_lock(struct mutex *lock, unsigned int subclass); 48 extern void ldv_mutex_lock_lock(struct mutex *lock); 49 extern int ldv_mutex_trylock_lock(struct mutex *lock); 50 extern int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock); 51 extern int ldv_mutex_is_locked_lock(struct mutex *lock); 52 extern void ldv_mutex_unlock_lock(struct mutex *lock); 53 54 extern void ldv_usb_lock_device_lock(void); 55 extern void ldv_usb_unlock_device_lock(void); 56 extern int ldv_usb_trylock_device_lock(void); 57 extern int ldv_usb_lock_device_for_reset_lock(void); 58 extern int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock); 59 extern int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock); 60 extern void ldv_mutex_lock_nested_mutex_of_device(struct mutex *lock, unsigned int subclass); 61 extern void ldv_mutex_lock_mutex_of_device(struct mutex *lock); 62 extern int ldv_mutex_trylock_mutex_of_device(struct mutex *lock); 63 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock); 64 extern int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock); 65 extern void ldv_mutex_unlock_mutex_of_device(struct mutex *lock); 66 67 extern void ldv_usb_lock_device_mutex_of_device(void); 68 extern void ldv_usb_unlock_device_mutex_of_device(void); 69 extern int ldv_usb_trylock_device_mutex_of_device(void); 70 extern int ldv_usb_lock_device_for_reset_mutex_of_device(void); 71 extern int ldv_mutex_lock_interruptible_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock); 72 extern int ldv_mutex_lock_killable_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock); 73 extern void ldv_mutex_lock_nested_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock, unsigned int subclass); 74 extern void ldv_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock); 75 extern int ldv_mutex_trylock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock); 76 extern int ldv_atomic_dec_and_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(atomic_t *cnt, struct mutex *lock); 77 extern int ldv_mutex_is_locked_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock); 78 extern void ldv_mutex_unlock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock); 79 80 extern void ldv_usb_lock_device_spa_map_mutex_of_acpi_nfit_desc(void); 81 extern void ldv_usb_unlock_device_spa_map_mutex_of_acpi_nfit_desc(void); 82 extern int ldv_usb_trylock_device_spa_map_mutex_of_acpi_nfit_desc(void); 83 extern int ldv_usb_lock_device_for_reset_spa_map_mutex_of_acpi_nfit_desc(void); 84 #line 1 "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.c" 85 86 /* 87 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 88 * 89 * This program is free software; you can redistribute it and/or modify 90 * it under the terms of version 2 of the GNU General Public License as 91 * published by the Free Software Foundation. 92 * 93 * This program is distributed in the hope that it will be useful, but 94 * WITHOUT ANY WARRANTY; without even the implied warranty of 95 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 96 * General Public License for more details. 97 */ 98 #include <linux/list_sort.h> 99 #include <linux/libnvdimm.h> 100 #include <linux/module.h> 101 #include <linux/mutex.h> 102 #include <linux/ndctl.h> 103 #include <linux/list.h> 104 #include <linux/acpi.h> 105 #include <linux/sort.h> 106 #include <linux/pmem.h> 107 #include <linux/io.h> 108 #include <asm/cacheflush.h> 109 #include "nfit.h" 110 111 /* 112 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 113 * irrelevant. 114 */ 115 #include <linux/io-64-nonatomic-hi-lo.h> 116 117 static bool force_enable_dimms; 118 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 119 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 120 121 struct nfit_table_prev { 122 struct list_head spas; 123 struct list_head memdevs; 124 struct list_head dcrs; 125 struct list_head bdws; 126 struct list_head idts; 127 struct list_head flushes; 128 }; 129 130 static u8 nfit_uuid[NFIT_UUID_MAX][16]; 131 132 const u8 *to_nfit_uuid(enum nfit_uuids id) 133 { 134 return nfit_uuid[id]; 135 } 136 EXPORT_SYMBOL(to_nfit_uuid); 137 138 static struct acpi_nfit_desc *to_acpi_nfit_desc( 139 struct nvdimm_bus_descriptor *nd_desc) 140 { 141 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 142 } 143 144 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 145 { 146 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 147 148 /* 149 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 150 * acpi_device. 151 */ 152 if (!nd_desc->provider_name 153 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 154 return NULL; 155 156 return to_acpi_device(acpi_desc->dev); 157 } 158 159 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, 160 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 161 unsigned int buf_len) 162 { 163 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 164 const struct nd_cmd_desc *desc = NULL; 165 union acpi_object in_obj, in_buf, *out_obj; 166 struct device *dev = acpi_desc->dev; 167 const char *cmd_name, *dimm_name; 168 unsigned long dsm_mask; 169 acpi_handle handle; 170 const u8 *uuid; 171 u32 offset; 172 int rc, i; 173 174 if (nvdimm) { 175 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 176 struct acpi_device *adev = nfit_mem->adev; 177 178 if (!adev) 179 return -ENOTTY; 180 dimm_name = nvdimm_name(nvdimm); 181 cmd_name = nvdimm_cmd_name(cmd); 182 dsm_mask = nfit_mem->dsm_mask; 183 desc = nd_cmd_dimm_desc(cmd); 184 uuid = to_nfit_uuid(NFIT_DEV_DIMM); 185 handle = adev->handle; 186 } else { 187 struct acpi_device *adev = to_acpi_dev(acpi_desc); 188 189 cmd_name = nvdimm_bus_cmd_name(cmd); 190 dsm_mask = nd_desc->dsm_mask; 191 desc = nd_cmd_bus_desc(cmd); 192 uuid = to_nfit_uuid(NFIT_DEV_BUS); 193 handle = adev->handle; 194 dimm_name = "bus"; 195 } 196 197 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 198 return -ENOTTY; 199 200 if (!test_bit(cmd, &dsm_mask)) 201 return -ENOTTY; 202 203 in_obj.type = ACPI_TYPE_PACKAGE; 204 in_obj.package.count = 1; 205 in_obj.package.elements = &in_buf; 206 in_buf.type = ACPI_TYPE_BUFFER; 207 in_buf.buffer.pointer = buf; 208 in_buf.buffer.length = 0; 209 210 /* libnvdimm has already validated the input envelope */ 211 for (i = 0; i < desc->in_num; i++) 212 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 213 i, buf); 214 215 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 216 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__, 217 dimm_name, cmd_name, in_buf.buffer.length); 218 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 219 4, in_buf.buffer.pointer, min_t(u32, 128, 220 in_buf.buffer.length), true); 221 } 222 223 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj); 224 if (!out_obj) { 225 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, 226 cmd_name); 227 return -EINVAL; 228 } 229 230 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 231 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", 232 __func__, dimm_name, cmd_name, out_obj->type); 233 rc = -EINVAL; 234 goto out; 235 } 236 237 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 238 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, 239 dimm_name, cmd_name, out_obj->buffer.length); 240 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 241 4, out_obj->buffer.pointer, min_t(u32, 128, 242 out_obj->buffer.length), true); 243 } 244 245 for (i = 0, offset = 0; i < desc->out_num; i++) { 246 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 247 (u32 *) out_obj->buffer.pointer); 248 249 if (offset + out_size > out_obj->buffer.length) { 250 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", 251 __func__, dimm_name, cmd_name, i); 252 break; 253 } 254 255 if (in_buf.buffer.length + offset + out_size > buf_len) { 256 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", 257 __func__, dimm_name, cmd_name, i); 258 rc = -ENXIO; 259 goto out; 260 } 261 memcpy(buf + in_buf.buffer.length + offset, 262 out_obj->buffer.pointer + offset, out_size); 263 offset += out_size; 264 } 265 if (offset + in_buf.buffer.length < buf_len) { 266 if (i >= 1) { 267 /* 268 * status valid, return the number of bytes left 269 * unfilled in the output buffer 270 */ 271 rc = buf_len - offset - in_buf.buffer.length; 272 } else { 273 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 274 __func__, dimm_name, cmd_name, buf_len, 275 offset); 276 rc = -ENXIO; 277 } 278 } else 279 rc = 0; 280 281 out: 282 ACPI_FREE(out_obj); 283 284 return rc; 285 } 286 287 static const char *spa_type_name(u16 type) 288 { 289 static const char *to_name[] = { 290 [NFIT_SPA_VOLATILE] = "volatile", 291 [NFIT_SPA_PM] = "pmem", 292 [NFIT_SPA_DCR] = "dimm-control-region", 293 [NFIT_SPA_BDW] = "block-data-window", 294 [NFIT_SPA_VDISK] = "volatile-disk", 295 [NFIT_SPA_VCD] = "volatile-cd", 296 [NFIT_SPA_PDISK] = "persistent-disk", 297 [NFIT_SPA_PCD] = "persistent-cd", 298 299 }; 300 301 if (type > NFIT_SPA_PCD) 302 return "unknown"; 303 304 return to_name[type]; 305 } 306 307 static int nfit_spa_type(struct acpi_nfit_system_address *spa) 308 { 309 int i; 310 311 for (i = 0; i < NFIT_UUID_MAX; i++) 312 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) 313 return i; 314 return -1; 315 } 316 317 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 318 struct nfit_table_prev *prev, 319 struct acpi_nfit_system_address *spa) 320 { 321 struct device *dev = acpi_desc->dev; 322 struct nfit_spa *nfit_spa; 323 324 list_for_each_entry(nfit_spa, &prev->spas, list) { 325 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 326 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 327 return true; 328 } 329 } 330 331 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL); 332 if (!nfit_spa) 333 return false; 334 INIT_LIST_HEAD(&nfit_spa->list); 335 nfit_spa->spa = spa; 336 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 337 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 338 spa->range_index, 339 spa_type_name(nfit_spa_type(spa))); 340 return true; 341 } 342 343 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 344 struct nfit_table_prev *prev, 345 struct acpi_nfit_memory_map *memdev) 346 { 347 struct device *dev = acpi_desc->dev; 348 struct nfit_memdev *nfit_memdev; 349 350 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 351 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 352 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 353 return true; 354 } 355 356 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL); 357 if (!nfit_memdev) 358 return false; 359 INIT_LIST_HEAD(&nfit_memdev->list); 360 nfit_memdev->memdev = memdev; 361 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 362 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", 363 __func__, memdev->device_handle, memdev->range_index, 364 memdev->region_index); 365 return true; 366 } 367 368 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 369 struct nfit_table_prev *prev, 370 struct acpi_nfit_control_region *dcr) 371 { 372 struct device *dev = acpi_desc->dev; 373 struct nfit_dcr *nfit_dcr; 374 375 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 376 if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { 377 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 378 return true; 379 } 380 381 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL); 382 if (!nfit_dcr) 383 return false; 384 INIT_LIST_HEAD(&nfit_dcr->list); 385 nfit_dcr->dcr = dcr; 386 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 387 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 388 dcr->region_index, dcr->windows); 389 return true; 390 } 391 392 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 393 struct nfit_table_prev *prev, 394 struct acpi_nfit_data_region *bdw) 395 { 396 struct device *dev = acpi_desc->dev; 397 struct nfit_bdw *nfit_bdw; 398 399 list_for_each_entry(nfit_bdw, &prev->bdws, list) 400 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 401 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 402 return true; 403 } 404 405 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL); 406 if (!nfit_bdw) 407 return false; 408 INIT_LIST_HEAD(&nfit_bdw->list); 409 nfit_bdw->bdw = bdw; 410 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 411 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 412 bdw->region_index, bdw->windows); 413 return true; 414 } 415 416 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 417 struct nfit_table_prev *prev, 418 struct acpi_nfit_interleave *idt) 419 { 420 struct device *dev = acpi_desc->dev; 421 struct nfit_idt *nfit_idt; 422 423 list_for_each_entry(nfit_idt, &prev->idts, list) 424 if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { 425 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 426 return true; 427 } 428 429 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL); 430 if (!nfit_idt) 431 return false; 432 INIT_LIST_HEAD(&nfit_idt->list); 433 nfit_idt->idt = idt; 434 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 435 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 436 idt->interleave_index, idt->line_count); 437 return true; 438 } 439 440 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 441 struct nfit_table_prev *prev, 442 struct acpi_nfit_flush_address *flush) 443 { 444 struct device *dev = acpi_desc->dev; 445 struct nfit_flush *nfit_flush; 446 447 list_for_each_entry(nfit_flush, &prev->flushes, list) 448 if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { 449 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 450 return true; 451 } 452 453 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL); 454 if (!nfit_flush) 455 return false; 456 INIT_LIST_HEAD(&nfit_flush->list); 457 nfit_flush->flush = flush; 458 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 459 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 460 flush->device_handle, flush->hint_count); 461 return true; 462 } 463 464 static void *add_table(struct acpi_nfit_desc *acpi_desc, 465 struct nfit_table_prev *prev, void *table, const void *end) 466 { 467 struct device *dev = acpi_desc->dev; 468 struct acpi_nfit_header *hdr; 469 void *err = ERR_PTR(-ENOMEM); 470 471 if (table >= end) 472 return NULL; 473 474 hdr = table; 475 if (!hdr->length) { 476 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 477 hdr->type); 478 return NULL; 479 } 480 481 switch (hdr->type) { 482 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 483 if (!add_spa(acpi_desc, prev, table)) 484 return err; 485 break; 486 case ACPI_NFIT_TYPE_MEMORY_MAP: 487 if (!add_memdev(acpi_desc, prev, table)) 488 return err; 489 break; 490 case ACPI_NFIT_TYPE_CONTROL_REGION: 491 if (!add_dcr(acpi_desc, prev, table)) 492 return err; 493 break; 494 case ACPI_NFIT_TYPE_DATA_REGION: 495 if (!add_bdw(acpi_desc, prev, table)) 496 return err; 497 break; 498 case ACPI_NFIT_TYPE_INTERLEAVE: 499 if (!add_idt(acpi_desc, prev, table)) 500 return err; 501 break; 502 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 503 if (!add_flush(acpi_desc, prev, table)) 504 return err; 505 break; 506 case ACPI_NFIT_TYPE_SMBIOS: 507 dev_dbg(dev, "%s: smbios\n", __func__); 508 break; 509 default: 510 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 511 break; 512 } 513 514 return table + hdr->length; 515 } 516 517 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 518 struct nfit_mem *nfit_mem) 519 { 520 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 521 u16 dcr = nfit_mem->dcr->region_index; 522 struct nfit_spa *nfit_spa; 523 524 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 525 u16 range_index = nfit_spa->spa->range_index; 526 int type = nfit_spa_type(nfit_spa->spa); 527 struct nfit_memdev *nfit_memdev; 528 529 if (type != NFIT_SPA_BDW) 530 continue; 531 532 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 533 if (nfit_memdev->memdev->range_index != range_index) 534 continue; 535 if (nfit_memdev->memdev->device_handle != device_handle) 536 continue; 537 if (nfit_memdev->memdev->region_index != dcr) 538 continue; 539 540 nfit_mem->spa_bdw = nfit_spa->spa; 541 return; 542 } 543 } 544 545 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 546 nfit_mem->spa_dcr->range_index); 547 nfit_mem->bdw = NULL; 548 } 549 550 static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, 551 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 552 { 553 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 554 struct nfit_memdev *nfit_memdev; 555 struct nfit_flush *nfit_flush; 556 struct nfit_dcr *nfit_dcr; 557 struct nfit_bdw *nfit_bdw; 558 struct nfit_idt *nfit_idt; 559 u16 idt_idx, range_index; 560 561 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 562 if (nfit_dcr->dcr->region_index != dcr) 563 continue; 564 nfit_mem->dcr = nfit_dcr->dcr; 565 break; 566 } 567 568 if (!nfit_mem->dcr) { 569 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n", 570 spa->range_index, __to_nfit_memdev(nfit_mem) 571 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR"); 572 return -ENODEV; 573 } 574 575 /* 576 * We've found enough to create an nvdimm, optionally 577 * find an associated BDW 578 */ 579 list_add(&nfit_mem->list, &acpi_desc->dimms); 580 581 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 582 if (nfit_bdw->bdw->region_index != dcr) 583 continue; 584 nfit_mem->bdw = nfit_bdw->bdw; 585 break; 586 } 587 588 if (!nfit_mem->bdw) 589 return 0; 590 591 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 592 593 if (!nfit_mem->spa_bdw) 594 return 0; 595 596 range_index = nfit_mem->spa_bdw->range_index; 597 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 598 if (nfit_memdev->memdev->range_index != range_index || 599 nfit_memdev->memdev->region_index != dcr) 600 continue; 601 nfit_mem->memdev_bdw = nfit_memdev->memdev; 602 idt_idx = nfit_memdev->memdev->interleave_index; 603 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 604 if (nfit_idt->idt->interleave_index != idt_idx) 605 continue; 606 nfit_mem->idt_bdw = nfit_idt->idt; 607 break; 608 } 609 610 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 611 if (nfit_flush->flush->device_handle != 612 nfit_memdev->memdev->device_handle) 613 continue; 614 nfit_mem->nfit_flush = nfit_flush; 615 break; 616 } 617 break; 618 } 619 620 return 0; 621 } 622 623 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 624 struct acpi_nfit_system_address *spa) 625 { 626 struct nfit_mem *nfit_mem, *found; 627 struct nfit_memdev *nfit_memdev; 628 int type = nfit_spa_type(spa); 629 u16 dcr; 630 631 switch (type) { 632 case NFIT_SPA_DCR: 633 case NFIT_SPA_PM: 634 break; 635 default: 636 return 0; 637 } 638 639 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 640 int rc; 641 642 if (nfit_memdev->memdev->range_index != spa->range_index) 643 continue; 644 found = NULL; 645 dcr = nfit_memdev->memdev->region_index; 646 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 647 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { 648 found = nfit_mem; 649 break; 650 } 651 652 if (found) 653 nfit_mem = found; 654 else { 655 nfit_mem = devm_kzalloc(acpi_desc->dev, 656 sizeof(*nfit_mem), GFP_KERNEL); 657 if (!nfit_mem) 658 return -ENOMEM; 659 INIT_LIST_HEAD(&nfit_mem->list); 660 } 661 662 if (type == NFIT_SPA_DCR) { 663 struct nfit_idt *nfit_idt; 664 u16 idt_idx; 665 666 /* multiple dimms may share a SPA when interleaved */ 667 nfit_mem->spa_dcr = spa; 668 nfit_mem->memdev_dcr = nfit_memdev->memdev; 669 idt_idx = nfit_memdev->memdev->interleave_index; 670 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 671 if (nfit_idt->idt->interleave_index != idt_idx) 672 continue; 673 nfit_mem->idt_dcr = nfit_idt->idt; 674 break; 675 } 676 } else { 677 /* 678 * A single dimm may belong to multiple SPA-PM 679 * ranges, record at least one in addition to 680 * any SPA-DCR range. 681 */ 682 nfit_mem->memdev_pmem = nfit_memdev->memdev; 683 } 684 685 if (found) 686 continue; 687 688 rc = nfit_mem_add(acpi_desc, nfit_mem, spa); 689 if (rc) 690 return rc; 691 } 692 693 return 0; 694 } 695 696 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 697 { 698 struct nfit_mem *a = container_of(_a, typeof(*a), list); 699 struct nfit_mem *b = container_of(_b, typeof(*b), list); 700 u32 handleA, handleB; 701 702 handleA = __to_nfit_memdev(a)->device_handle; 703 handleB = __to_nfit_memdev(b)->device_handle; 704 if (handleA < handleB) 705 return -1; 706 else if (handleA > handleB) 707 return 1; 708 return 0; 709 } 710 711 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 712 { 713 struct nfit_spa *nfit_spa; 714 715 /* 716 * For each SPA-DCR or SPA-PMEM address range find its 717 * corresponding MEMDEV(s). From each MEMDEV find the 718 * corresponding DCR. Then, if we're operating on a SPA-DCR, 719 * try to find a SPA-BDW and a corresponding BDW that references 720 * the DCR. Throw it all into an nfit_mem object. Note, that 721 * BDWs are optional. 722 */ 723 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 724 int rc; 725 726 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); 727 if (rc) 728 return rc; 729 } 730 731 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 732 733 return 0; 734 } 735 736 static ssize_t revision_show(struct device *dev, 737 struct device_attribute *attr, char *buf) 738 { 739 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 740 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 741 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 742 743 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); 744 } 745 static DEVICE_ATTR_RO(revision); 746 747 static struct attribute *acpi_nfit_attributes[] = { 748 &dev_attr_revision.attr, 749 NULL, 750 }; 751 752 static struct attribute_group acpi_nfit_attribute_group = { 753 .name = "nfit", 754 .attrs = acpi_nfit_attributes, 755 }; 756 757 const struct attribute_group *acpi_nfit_attribute_groups[] = { 758 &nvdimm_bus_attribute_group, 759 &acpi_nfit_attribute_group, 760 NULL, 761 }; 762 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups); 763 764 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 765 { 766 struct nvdimm *nvdimm = to_nvdimm(dev); 767 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 768 769 return __to_nfit_memdev(nfit_mem); 770 } 771 772 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 773 { 774 struct nvdimm *nvdimm = to_nvdimm(dev); 775 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 776 777 return nfit_mem->dcr; 778 } 779 780 static ssize_t handle_show(struct device *dev, 781 struct device_attribute *attr, char *buf) 782 { 783 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 784 785 return sprintf(buf, "%#x\n", memdev->device_handle); 786 } 787 static DEVICE_ATTR_RO(handle); 788 789 static ssize_t phys_id_show(struct device *dev, 790 struct device_attribute *attr, char *buf) 791 { 792 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 793 794 return sprintf(buf, "%#x\n", memdev->physical_id); 795 } 796 static DEVICE_ATTR_RO(phys_id); 797 798 static ssize_t vendor_show(struct device *dev, 799 struct device_attribute *attr, char *buf) 800 { 801 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 802 803 return sprintf(buf, "%#x\n", dcr->vendor_id); 804 } 805 static DEVICE_ATTR_RO(vendor); 806 807 static ssize_t rev_id_show(struct device *dev, 808 struct device_attribute *attr, char *buf) 809 { 810 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 811 812 return sprintf(buf, "%#x\n", dcr->revision_id); 813 } 814 static DEVICE_ATTR_RO(rev_id); 815 816 static ssize_t device_show(struct device *dev, 817 struct device_attribute *attr, char *buf) 818 { 819 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 820 821 return sprintf(buf, "%#x\n", dcr->device_id); 822 } 823 static DEVICE_ATTR_RO(device); 824 825 static ssize_t format_show(struct device *dev, 826 struct device_attribute *attr, char *buf) 827 { 828 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 829 830 return sprintf(buf, "%#x\n", dcr->code); 831 } 832 static DEVICE_ATTR_RO(format); 833 834 static ssize_t serial_show(struct device *dev, 835 struct device_attribute *attr, char *buf) 836 { 837 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 838 839 return sprintf(buf, "%#x\n", dcr->serial_number); 840 } 841 static DEVICE_ATTR_RO(serial); 842 843 static ssize_t flags_show(struct device *dev, 844 struct device_attribute *attr, char *buf) 845 { 846 u16 flags = to_nfit_memdev(dev)->flags; 847 848 return sprintf(buf, "%s%s%s%s%s\n", 849 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 850 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 851 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 852 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 853 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); 854 } 855 static DEVICE_ATTR_RO(flags); 856 857 static struct attribute *acpi_nfit_dimm_attributes[] = { 858 &dev_attr_handle.attr, 859 &dev_attr_phys_id.attr, 860 &dev_attr_vendor.attr, 861 &dev_attr_device.attr, 862 &dev_attr_format.attr, 863 &dev_attr_serial.attr, 864 &dev_attr_rev_id.attr, 865 &dev_attr_flags.attr, 866 NULL, 867 }; 868 869 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 870 struct attribute *a, int n) 871 { 872 struct device *dev = container_of(kobj, struct device, kobj); 873 874 if (to_nfit_dcr(dev)) 875 return a->mode; 876 else 877 return 0; 878 } 879 880 static struct attribute_group acpi_nfit_dimm_attribute_group = { 881 .name = "nfit", 882 .attrs = acpi_nfit_dimm_attributes, 883 .is_visible = acpi_nfit_dimm_attr_visible, 884 }; 885 886 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 887 &nvdimm_attribute_group, 888 &nd_device_attribute_group, 889 &acpi_nfit_dimm_attribute_group, 890 NULL, 891 }; 892 893 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 894 u32 device_handle) 895 { 896 struct nfit_mem *nfit_mem; 897 898 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 899 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 900 return nfit_mem->nvdimm; 901 902 return NULL; 903 } 904 905 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 906 struct nfit_mem *nfit_mem, u32 device_handle) 907 { 908 struct acpi_device *adev, *adev_dimm; 909 struct device *dev = acpi_desc->dev; 910 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); 911 int i; 912 913 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; 914 adev = to_acpi_dev(acpi_desc); 915 if (!adev) 916 return 0; 917 918 adev_dimm = acpi_find_child_device(adev, device_handle, false); 919 nfit_mem->adev = adev_dimm; 920 if (!adev_dimm) { 921 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 922 device_handle); 923 return force_enable_dimms ? 0 : -ENODEV; 924 } 925 926 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) 927 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) 928 set_bit(i, &nfit_mem->dsm_mask); 929 930 return 0; 931 } 932 933 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 934 { 935 struct nfit_mem *nfit_mem; 936 int dimm_count = 0; 937 938 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 939 struct nvdimm *nvdimm; 940 unsigned long flags = 0; 941 u32 device_handle; 942 u16 mem_flags; 943 int rc; 944 945 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 946 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 947 if (nvdimm) { 948 dimm_count++; 949 continue; 950 } 951 952 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 953 flags |= NDD_ALIASING; 954 955 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 956 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 957 flags |= NDD_UNARMED; 958 959 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 960 if (rc) 961 continue; 962 963 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 964 acpi_nfit_dimm_attribute_groups, 965 flags, &nfit_mem->dsm_mask); 966 if (!nvdimm) 967 return -ENOMEM; 968 969 nfit_mem->nvdimm = nvdimm; 970 dimm_count++; 971 972 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 973 continue; 974 975 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", 976 nvdimm_name(nvdimm), 977 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 978 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 979 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 980 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); 981 982 } 983 984 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 985 } 986 987 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 988 { 989 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 990 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); 991 struct acpi_device *adev; 992 int i; 993 994 nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en; 995 adev = to_acpi_dev(acpi_desc); 996 if (!adev) 997 return; 998 999 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++) 1000 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) 1001 set_bit(i, &nd_desc->dsm_mask); 1002 } 1003 1004 static ssize_t range_index_show(struct device *dev, 1005 struct device_attribute *attr, char *buf) 1006 { 1007 struct nd_region *nd_region = to_nd_region(dev); 1008 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1009 1010 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 1011 } 1012 static DEVICE_ATTR_RO(range_index); 1013 1014 static struct attribute *acpi_nfit_region_attributes[] = { 1015 &dev_attr_range_index.attr, 1016 NULL, 1017 }; 1018 1019 static struct attribute_group acpi_nfit_region_attribute_group = { 1020 .name = "nfit", 1021 .attrs = acpi_nfit_region_attributes, 1022 }; 1023 1024 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 1025 &nd_region_attribute_group, 1026 &nd_mapping_attribute_group, 1027 &nd_device_attribute_group, 1028 &nd_numa_attribute_group, 1029 &acpi_nfit_region_attribute_group, 1030 NULL, 1031 }; 1032 1033 /* enough info to uniquely specify an interleave set */ 1034 struct nfit_set_info { 1035 struct nfit_set_info_map { 1036 u64 region_offset; 1037 u32 serial_number; 1038 u32 pad; 1039 } mapping[0]; 1040 }; 1041 1042 static size_t sizeof_nfit_set_info(int num_mappings) 1043 { 1044 return sizeof(struct nfit_set_info) 1045 + num_mappings * sizeof(struct nfit_set_info_map); 1046 } 1047 1048 static int cmp_map(const void *m0, const void *m1) 1049 { 1050 const struct nfit_set_info_map *map0 = m0; 1051 const struct nfit_set_info_map *map1 = m1; 1052 1053 return memcmp(&map0->region_offset, &map1->region_offset, 1054 sizeof(u64)); 1055 } 1056 1057 /* Retrieve the nth entry referencing this spa */ 1058 static struct acpi_nfit_memory_map *memdev_from_spa( 1059 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 1060 { 1061 struct nfit_memdev *nfit_memdev; 1062 1063 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 1064 if (nfit_memdev->memdev->range_index == range_index) 1065 if (n-- == 0) 1066 return nfit_memdev->memdev; 1067 return NULL; 1068 } 1069 1070 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 1071 struct nd_region_desc *ndr_desc, 1072 struct acpi_nfit_system_address *spa) 1073 { 1074 int i, spa_type = nfit_spa_type(spa); 1075 struct device *dev = acpi_desc->dev; 1076 struct nd_interleave_set *nd_set; 1077 u16 nr = ndr_desc->num_mappings; 1078 struct nfit_set_info *info; 1079 1080 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) 1081 /* pass */; 1082 else 1083 return 0; 1084 1085 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 1086 if (!nd_set) 1087 return -ENOMEM; 1088 1089 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 1090 if (!info) 1091 return -ENOMEM; 1092 for (i = 0; i < nr; i++) { 1093 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; 1094 struct nfit_set_info_map *map = &info->mapping[i]; 1095 struct nvdimm *nvdimm = nd_mapping->nvdimm; 1096 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1097 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 1098 spa->range_index, i); 1099 1100 if (!memdev || !nfit_mem->dcr) { 1101 dev_err(dev, "%s: failed to find DCR\n", __func__); 1102 return -ENODEV; 1103 } 1104 1105 map->region_offset = memdev->region_offset; 1106 map->serial_number = nfit_mem->dcr->serial_number; 1107 } 1108 1109 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 1110 cmp_map, NULL); 1111 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 1112 ndr_desc->nd_set = nd_set; 1113 devm_kfree(dev, info); 1114 1115 return 0; 1116 } 1117 1118 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 1119 { 1120 struct acpi_nfit_interleave *idt = mmio->idt; 1121 u32 sub_line_offset, line_index, line_offset; 1122 u64 line_no, table_skip_count, table_offset; 1123 1124 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 1125 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 1126 line_offset = idt->line_offset[line_index] 1127 * mmio->line_size; 1128 table_offset = table_skip_count * mmio->table_size; 1129 1130 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1131 } 1132 1133 static void wmb_blk(struct nfit_blk *nfit_blk) 1134 { 1135 1136 if (nfit_blk->nvdimm_flush) { 1137 /* 1138 * The first wmb() is needed to 'sfence' all previous writes 1139 * such that they are architecturally visible for the platform 1140 * buffer flush. Note that we've already arranged for pmem 1141 * writes to avoid the cache via arch_memcpy_to_pmem(). The 1142 * final wmb() ensures ordering for the NVDIMM flush write. 1143 */ 1144 wmb(); 1145 writeq(1, nfit_blk->nvdimm_flush); 1146 wmb(); 1147 } else 1148 wmb_pmem(); 1149 } 1150 1151 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1152 { 1153 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1154 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1155 1156 if (mmio->num_lines) 1157 offset = to_interleave_offset(offset, mmio); 1158 1159 return readl(mmio->addr.base + offset); 1160 } 1161 1162 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 1163 resource_size_t dpa, unsigned int len, unsigned int write) 1164 { 1165 u64 cmd, offset; 1166 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1167 1168 enum { 1169 BCW_OFFSET_MASK = (1ULL << 48)-1, 1170 BCW_LEN_SHIFT = 48, 1171 BCW_LEN_MASK = (1ULL << 8) - 1, 1172 BCW_CMD_SHIFT = 56, 1173 }; 1174 1175 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 1176 len = len >> L1_CACHE_SHIFT; 1177 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 1178 cmd |= ((u64) write) << BCW_CMD_SHIFT; 1179 1180 offset = nfit_blk->cmd_offset + mmio->size * bw; 1181 if (mmio->num_lines) 1182 offset = to_interleave_offset(offset, mmio); 1183 1184 writeq(cmd, mmio->addr.base + offset); 1185 wmb_blk(nfit_blk); 1186 1187 if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) 1188 readq(mmio->addr.base + offset); 1189 } 1190 1191 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1192 resource_size_t dpa, void *iobuf, size_t len, int rw, 1193 unsigned int lane) 1194 { 1195 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1196 unsigned int copied = 0; 1197 u64 base_offset; 1198 int rc; 1199 1200 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1201 + lane * mmio->size; 1202 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1203 while (len) { 1204 unsigned int c; 1205 u64 offset; 1206 1207 if (mmio->num_lines) { 1208 u32 line_offset; 1209 1210 offset = to_interleave_offset(base_offset + copied, 1211 mmio); 1212 div_u64_rem(offset, mmio->line_size, &line_offset); 1213 c = min_t(size_t, len, mmio->line_size - line_offset); 1214 } else { 1215 offset = base_offset + nfit_blk->bdw_offset; 1216 c = len; 1217 } 1218 1219 if (rw) 1220 memcpy_to_pmem(mmio->addr.aperture + offset, 1221 iobuf + copied, c); 1222 else { 1223 if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH) 1224 mmio_flush_range((void __force *) 1225 mmio->addr.aperture + offset, c); 1226 1227 memcpy_from_pmem(iobuf + copied, 1228 mmio->addr.aperture + offset, c); 1229 } 1230 1231 copied += c; 1232 len -= c; 1233 } 1234 1235 if (rw) 1236 wmb_blk(nfit_blk); 1237 1238 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1239 return rc; 1240 } 1241 1242 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 1243 resource_size_t dpa, void *iobuf, u64 len, int rw) 1244 { 1245 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1246 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1247 struct nd_region *nd_region = nfit_blk->nd_region; 1248 unsigned int lane, copied = 0; 1249 int rc = 0; 1250 1251 lane = nd_region_acquire_lane(nd_region); 1252 while (len) { 1253 u64 c = min(len, mmio->size); 1254 1255 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 1256 iobuf + copied, c, rw, lane); 1257 if (rc) 1258 break; 1259 1260 copied += c; 1261 len -= c; 1262 } 1263 nd_region_release_lane(nd_region, lane); 1264 1265 return rc; 1266 } 1267 1268 static void nfit_spa_mapping_release(struct kref *kref) 1269 { 1270 struct nfit_spa_mapping *spa_map = to_spa_map(kref); 1271 struct acpi_nfit_system_address *spa = spa_map->spa; 1272 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc; 1273 1274 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1275 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); 1276 if (spa_map->type == SPA_MAP_APERTURE) 1277 memunmap((void __force *)spa_map->addr.aperture); 1278 else 1279 iounmap(spa_map->addr.base); 1280 release_mem_region(spa->address, spa->length); 1281 list_del(&spa_map->list); 1282 kfree(spa_map); 1283 } 1284 1285 static struct nfit_spa_mapping *find_spa_mapping( 1286 struct acpi_nfit_desc *acpi_desc, 1287 struct acpi_nfit_system_address *spa) 1288 { 1289 struct nfit_spa_mapping *spa_map; 1290 1291 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1292 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list) 1293 if (spa_map->spa == spa) 1294 return spa_map; 1295 1296 return NULL; 1297 } 1298 1299 static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, 1300 struct acpi_nfit_system_address *spa) 1301 { 1302 struct nfit_spa_mapping *spa_map; 1303 1304 mutex_lock(&acpi_desc->spa_map_mutex); 1305 spa_map = find_spa_mapping(acpi_desc, spa); 1306 1307 if (spa_map) 1308 kref_put(&spa_map->kref, nfit_spa_mapping_release); 1309 mutex_unlock(&acpi_desc->spa_map_mutex); 1310 } 1311 1312 static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1313 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1314 { 1315 resource_size_t start = spa->address; 1316 resource_size_t n = spa->length; 1317 struct nfit_spa_mapping *spa_map; 1318 struct resource *res; 1319 1320 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1321 1322 spa_map = find_spa_mapping(acpi_desc, spa); 1323 if (spa_map) { 1324 kref_get(&spa_map->kref); 1325 return spa_map->addr.base; 1326 } 1327 1328 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); 1329 if (!spa_map) 1330 return NULL; 1331 1332 INIT_LIST_HEAD(&spa_map->list); 1333 spa_map->spa = spa; 1334 kref_init(&spa_map->kref); 1335 spa_map->acpi_desc = acpi_desc; 1336 1337 res = request_mem_region(start, n, dev_name(acpi_desc->dev)); 1338 if (!res) 1339 goto err_mem; 1340 1341 spa_map->type = type; 1342 if (type == SPA_MAP_APERTURE) 1343 spa_map->addr.aperture = (void __pmem *)memremap(start, n, 1344 ARCH_MEMREMAP_PMEM); 1345 else 1346 spa_map->addr.base = ioremap_nocache(start, n); 1347 1348 1349 if (!spa_map->addr.base) 1350 goto err_map; 1351 1352 list_add_tail(&spa_map->list, &acpi_desc->spa_maps); 1353 return spa_map->addr.base; 1354 1355 err_map: 1356 release_mem_region(start, n); 1357 err_mem: 1358 kfree(spa_map); 1359 return NULL; 1360 } 1361 1362 /** 1363 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges 1364 * @nvdimm_bus: NFIT-bus that provided the spa table entry 1365 * @nfit_spa: spa table to map 1366 * @type: aperture or control region 1367 * 1368 * In the case where block-data-window apertures and 1369 * dimm-control-regions are interleaved they will end up sharing a 1370 * single request_mem_region() + ioremap() for the address range. In 1371 * the style of devm nfit_spa_map() mappings are automatically dropped 1372 * when all region devices referencing the same mapping are disabled / 1373 * unbound. 1374 */ 1375 static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1376 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1377 { 1378 void __iomem *iomem; 1379 1380 mutex_lock(&acpi_desc->spa_map_mutex); 1381 iomem = __nfit_spa_map(acpi_desc, spa, type); 1382 mutex_unlock(&acpi_desc->spa_map_mutex); 1383 1384 return iomem; 1385 } 1386 1387 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1388 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1389 { 1390 if (idt) { 1391 mmio->num_lines = idt->line_count; 1392 mmio->line_size = idt->line_size; 1393 if (interleave_ways == 0) 1394 return -ENXIO; 1395 mmio->table_size = mmio->num_lines * interleave_ways 1396 * mmio->line_size; 1397 } 1398 1399 return 0; 1400 } 1401 1402 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 1403 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 1404 { 1405 struct nd_cmd_dimm_flags flags; 1406 int rc; 1407 1408 memset(&flags, 0, sizeof(flags)); 1409 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 1410 sizeof(flags)); 1411 1412 if (rc >= 0 && flags.status == 0) 1413 nfit_blk->dimm_flags = flags.flags; 1414 else if (rc == -ENOTTY) { 1415 /* fall back to a conservative default */ 1416 nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH; 1417 rc = 0; 1418 } else 1419 rc = -ENXIO; 1420 1421 return rc; 1422 } 1423 1424 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1425 struct device *dev) 1426 { 1427 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1428 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1429 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1430 struct nfit_flush *nfit_flush; 1431 struct nfit_blk_mmio *mmio; 1432 struct nfit_blk *nfit_blk; 1433 struct nfit_mem *nfit_mem; 1434 struct nvdimm *nvdimm; 1435 int rc; 1436 1437 nvdimm = nd_blk_region_to_dimm(ndbr); 1438 nfit_mem = nvdimm_provider_data(nvdimm); 1439 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1440 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1441 nfit_mem ? "" : " nfit_mem", 1442 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 1443 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 1444 return -ENXIO; 1445 } 1446 1447 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 1448 if (!nfit_blk) 1449 return -ENOMEM; 1450 nd_blk_region_set_provider_data(ndbr, nfit_blk); 1451 nfit_blk->nd_region = to_nd_region(dev); 1452 1453 /* map block aperture memory */ 1454 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1455 mmio = &nfit_blk->mmio[BDW]; 1456 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, 1457 SPA_MAP_APERTURE); 1458 if (!mmio->addr.base) { 1459 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1460 nvdimm_name(nvdimm)); 1461 return -ENOMEM; 1462 } 1463 mmio->size = nfit_mem->bdw->size; 1464 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 1465 mmio->idt = nfit_mem->idt_bdw; 1466 mmio->spa = nfit_mem->spa_bdw; 1467 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 1468 nfit_mem->memdev_bdw->interleave_ways); 1469 if (rc) { 1470 dev_dbg(dev, "%s: %s failed to init bdw interleave\n", 1471 __func__, nvdimm_name(nvdimm)); 1472 return rc; 1473 } 1474 1475 /* map block control memory */ 1476 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1477 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1478 mmio = &nfit_blk->mmio[DCR]; 1479 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, 1480 SPA_MAP_CONTROL); 1481 if (!mmio->addr.base) { 1482 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1483 nvdimm_name(nvdimm)); 1484 return -ENOMEM; 1485 } 1486 mmio->size = nfit_mem->dcr->window_size; 1487 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 1488 mmio->idt = nfit_mem->idt_dcr; 1489 mmio->spa = nfit_mem->spa_dcr; 1490 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 1491 nfit_mem->memdev_dcr->interleave_ways); 1492 if (rc) { 1493 dev_dbg(dev, "%s: %s failed to init dcr interleave\n", 1494 __func__, nvdimm_name(nvdimm)); 1495 return rc; 1496 } 1497 1498 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 1499 if (rc < 0) { 1500 dev_dbg(dev, "%s: %s failed get DIMM flags\n", 1501 __func__, nvdimm_name(nvdimm)); 1502 return rc; 1503 } 1504 1505 nfit_flush = nfit_mem->nfit_flush; 1506 if (nfit_flush && nfit_flush->flush->hint_count != 0) { 1507 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev, 1508 nfit_flush->flush->hint_address[0], 8); 1509 if (!nfit_blk->nvdimm_flush) 1510 return -ENOMEM; 1511 } 1512 1513 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) 1514 dev_warn(dev, "unable to guarantee persistence of writes\n"); 1515 1516 if (mmio->line_size == 0) 1517 return 0; 1518 1519 if ((u32) nfit_blk->cmd_offset % mmio->line_size 1520 + 8 > mmio->line_size) { 1521 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 1522 return -ENXIO; 1523 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 1524 + 8 > mmio->line_size) { 1525 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 1526 return -ENXIO; 1527 } 1528 1529 return 0; 1530 } 1531 1532 static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, 1533 struct device *dev) 1534 { 1535 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1536 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1537 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1538 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1539 int i; 1540 1541 if (!nfit_blk) 1542 return; /* never enabled */ 1543 1544 /* auto-free BLK spa mappings */ 1545 for (i = 0; i < 2; i++) { 1546 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; 1547 1548 if (mmio->addr.base) 1549 nfit_spa_unmap(acpi_desc, mmio->spa); 1550 } 1551 nd_blk_region_set_provider_data(ndbr, NULL); 1552 /* devm will free nfit_blk */ 1553 } 1554 1555 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 1556 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, 1557 struct acpi_nfit_memory_map *memdev, 1558 struct acpi_nfit_system_address *spa) 1559 { 1560 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 1561 memdev->device_handle); 1562 struct nd_blk_region_desc *ndbr_desc; 1563 struct nfit_mem *nfit_mem; 1564 int blk_valid = 0; 1565 1566 if (!nvdimm) { 1567 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 1568 spa->range_index, memdev->device_handle); 1569 return -ENODEV; 1570 } 1571 1572 nd_mapping->nvdimm = nvdimm; 1573 switch (nfit_spa_type(spa)) { 1574 case NFIT_SPA_PM: 1575 case NFIT_SPA_VOLATILE: 1576 nd_mapping->start = memdev->address; 1577 nd_mapping->size = memdev->region_size; 1578 break; 1579 case NFIT_SPA_DCR: 1580 nfit_mem = nvdimm_provider_data(nvdimm); 1581 if (!nfit_mem || !nfit_mem->bdw) { 1582 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 1583 spa->range_index, nvdimm_name(nvdimm)); 1584 } else { 1585 nd_mapping->size = nfit_mem->bdw->capacity; 1586 nd_mapping->start = nfit_mem->bdw->start_address; 1587 ndr_desc->num_lanes = nfit_mem->bdw->windows; 1588 blk_valid = 1; 1589 } 1590 1591 ndr_desc->nd_mapping = nd_mapping; 1592 ndr_desc->num_mappings = blk_valid; 1593 ndbr_desc = to_blk_region_desc(ndr_desc); 1594 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1595 ndbr_desc->disable = acpi_nfit_blk_region_disable; 1596 ndbr_desc->do_io = acpi_desc->blk_do_io; 1597 if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc)) 1598 return -ENOMEM; 1599 break; 1600 } 1601 1602 return 0; 1603 } 1604 1605 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 1606 struct nfit_spa *nfit_spa) 1607 { 1608 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; 1609 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1610 struct nd_blk_region_desc ndbr_desc; 1611 struct nd_region_desc *ndr_desc; 1612 struct nfit_memdev *nfit_memdev; 1613 struct nvdimm_bus *nvdimm_bus; 1614 struct resource res; 1615 int count = 0, rc; 1616 1617 if (nfit_spa->is_registered) 1618 return 0; 1619 1620 if (spa->range_index == 0) { 1621 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 1622 __func__); 1623 return 0; 1624 } 1625 1626 memset(&res, 0, sizeof(res)); 1627 memset(&nd_mappings, 0, sizeof(nd_mappings)); 1628 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 1629 res.start = spa->address; 1630 res.end = res.start + spa->length - 1; 1631 ndr_desc = &ndbr_desc.ndr_desc; 1632 ndr_desc->res = &res; 1633 ndr_desc->provider_data = nfit_spa; 1634 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 1635 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 1636 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 1637 spa->proximity_domain); 1638 else 1639 ndr_desc->numa_node = NUMA_NO_NODE; 1640 1641 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1642 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1643 struct nd_mapping *nd_mapping; 1644 1645 if (memdev->range_index != spa->range_index) 1646 continue; 1647 if (count >= ND_MAX_MAPPINGS) { 1648 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 1649 spa->range_index, ND_MAX_MAPPINGS); 1650 return -ENXIO; 1651 } 1652 nd_mapping = &nd_mappings[count++]; 1653 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, 1654 memdev, spa); 1655 if (rc) 1656 return rc; 1657 } 1658 1659 ndr_desc->nd_mapping = nd_mappings; 1660 ndr_desc->num_mappings = count; 1661 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 1662 if (rc) 1663 return rc; 1664 1665 nvdimm_bus = acpi_desc->nvdimm_bus; 1666 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 1667 if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc)) 1668 return -ENOMEM; 1669 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { 1670 if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc)) 1671 return -ENOMEM; 1672 } 1673 1674 nfit_spa->is_registered = 1; 1675 return 0; 1676 } 1677 1678 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 1679 { 1680 struct nfit_spa *nfit_spa; 1681 1682 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1683 int rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 1684 1685 if (rc) 1686 return rc; 1687 } 1688 return 0; 1689 } 1690 1691 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 1692 struct nfit_table_prev *prev) 1693 { 1694 struct device *dev = acpi_desc->dev; 1695 1696 if (!list_empty(&prev->spas) || 1697 !list_empty(&prev->memdevs) || 1698 !list_empty(&prev->dcrs) || 1699 !list_empty(&prev->bdws) || 1700 !list_empty(&prev->idts) || 1701 !list_empty(&prev->flushes)) { 1702 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 1703 return -ENXIO; 1704 } 1705 return 0; 1706 } 1707 1708 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) 1709 { 1710 struct device *dev = acpi_desc->dev; 1711 struct nfit_table_prev prev; 1712 const void *end; 1713 u8 *data; 1714 int rc; 1715 1716 mutex_lock(&acpi_desc->init_mutex); 1717 1718 INIT_LIST_HEAD(&prev.spas); 1719 INIT_LIST_HEAD(&prev.memdevs); 1720 INIT_LIST_HEAD(&prev.dcrs); 1721 INIT_LIST_HEAD(&prev.bdws); 1722 INIT_LIST_HEAD(&prev.idts); 1723 INIT_LIST_HEAD(&prev.flushes); 1724 1725 list_cut_position(&prev.spas, &acpi_desc->spas, 1726 acpi_desc->spas.prev); 1727 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 1728 acpi_desc->memdevs.prev); 1729 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 1730 acpi_desc->dcrs.prev); 1731 list_cut_position(&prev.bdws, &acpi_desc->bdws, 1732 acpi_desc->bdws.prev); 1733 list_cut_position(&prev.idts, &acpi_desc->idts, 1734 acpi_desc->idts.prev); 1735 list_cut_position(&prev.flushes, &acpi_desc->flushes, 1736 acpi_desc->flushes.prev); 1737 1738 data = (u8 *) acpi_desc->nfit; 1739 end = data + sz; 1740 data += sizeof(struct acpi_table_nfit); 1741 while (!IS_ERR_OR_NULL(data)) 1742 data = add_table(acpi_desc, &prev, data, end); 1743 1744 if (IS_ERR(data)) { 1745 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, 1746 PTR_ERR(data)); 1747 rc = PTR_ERR(data); 1748 goto out_unlock; 1749 } 1750 1751 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 1752 if (rc) 1753 goto out_unlock; 1754 1755 if (nfit_mem_init(acpi_desc) != 0) { 1756 rc = -ENOMEM; 1757 goto out_unlock; 1758 } 1759 1760 acpi_nfit_init_dsms(acpi_desc); 1761 1762 rc = acpi_nfit_register_dimms(acpi_desc); 1763 if (rc) 1764 goto out_unlock; 1765 1766 rc = acpi_nfit_register_regions(acpi_desc); 1767 1768 out_unlock: 1769 mutex_unlock(&acpi_desc->init_mutex); 1770 return rc; 1771 } 1772 EXPORT_SYMBOL_GPL(acpi_nfit_init); 1773 1774 static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev) 1775 { 1776 struct nvdimm_bus_descriptor *nd_desc; 1777 struct acpi_nfit_desc *acpi_desc; 1778 struct device *dev = &adev->dev; 1779 1780 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 1781 if (!acpi_desc) 1782 return ERR_PTR(-ENOMEM); 1783 1784 dev_set_drvdata(dev, acpi_desc); 1785 acpi_desc->dev = dev; 1786 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 1787 nd_desc = &acpi_desc->nd_desc; 1788 nd_desc->provider_name = "ACPI.NFIT"; 1789 nd_desc->ndctl = acpi_nfit_ctl; 1790 nd_desc->attr_groups = acpi_nfit_attribute_groups; 1791 1792 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc); 1793 if (!acpi_desc->nvdimm_bus) { 1794 devm_kfree(dev, acpi_desc); 1795 return ERR_PTR(-ENXIO); 1796 } 1797 1798 INIT_LIST_HEAD(&acpi_desc->spa_maps); 1799 INIT_LIST_HEAD(&acpi_desc->spas); 1800 INIT_LIST_HEAD(&acpi_desc->dcrs); 1801 INIT_LIST_HEAD(&acpi_desc->bdws); 1802 INIT_LIST_HEAD(&acpi_desc->idts); 1803 INIT_LIST_HEAD(&acpi_desc->flushes); 1804 INIT_LIST_HEAD(&acpi_desc->memdevs); 1805 INIT_LIST_HEAD(&acpi_desc->dimms); 1806 mutex_init(&acpi_desc->spa_map_mutex); 1807 mutex_init(&acpi_desc->init_mutex); 1808 1809 return acpi_desc; 1810 } 1811 1812 static int acpi_nfit_add(struct acpi_device *adev) 1813 { 1814 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1815 struct acpi_nfit_desc *acpi_desc; 1816 struct device *dev = &adev->dev; 1817 struct acpi_table_header *tbl; 1818 acpi_status status = AE_OK; 1819 acpi_size sz; 1820 int rc; 1821 1822 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz); 1823 if (ACPI_FAILURE(status)) { 1824 /* This is ok, we could have an nvdimm hotplugged later */ 1825 dev_dbg(dev, "failed to find NFIT at startup\n"); 1826 return 0; 1827 } 1828 1829 acpi_desc = acpi_nfit_desc_init(adev); 1830 if (IS_ERR(acpi_desc)) { 1831 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 1832 __func__, PTR_ERR(acpi_desc)); 1833 return PTR_ERR(acpi_desc); 1834 } 1835 1836 acpi_desc->nfit = (struct acpi_table_nfit *) tbl; 1837 1838 /* Evaluate _FIT and override with that if present */ 1839 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1840 if (ACPI_SUCCESS(status) && buf.length > 0) { 1841 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1842 sz = buf.length; 1843 } 1844 1845 rc = acpi_nfit_init(acpi_desc, sz); 1846 if (rc) { 1847 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1848 return rc; 1849 } 1850 return 0; 1851 } 1852 1853 static int acpi_nfit_remove(struct acpi_device *adev) 1854 { 1855 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1856 1857 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1858 return 0; 1859 } 1860 1861 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 1862 { 1863 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1864 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1865 struct acpi_table_nfit *nfit_saved; 1866 struct device *dev = &adev->dev; 1867 acpi_status status; 1868 int ret; 1869 1870 dev_dbg(dev, "%s: event: %d\n", __func__, event); 1871 1872 device_lock(dev); 1873 if (!dev->driver) { 1874 /* dev->driver may be null if we're being removed */ 1875 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 1876 return; 1877 } 1878 1879 if (!acpi_desc) { 1880 acpi_desc = acpi_nfit_desc_init(adev); 1881 if (IS_ERR(acpi_desc)) { 1882 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 1883 __func__, PTR_ERR(acpi_desc)); 1884 goto out_unlock; 1885 } 1886 } 1887 1888 /* Evaluate _FIT */ 1889 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1890 if (ACPI_FAILURE(status)) { 1891 dev_err(dev, "failed to evaluate _FIT\n"); 1892 goto out_unlock; 1893 } 1894 1895 nfit_saved = acpi_desc->nfit; 1896 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1897 ret = acpi_nfit_init(acpi_desc, buf.length); 1898 if (!ret) { 1899 /* Merge failed, restore old nfit, and exit */ 1900 acpi_desc->nfit = nfit_saved; 1901 dev_err(dev, "failed to merge updated NFIT\n"); 1902 } 1903 kfree(buf.pointer); 1904 1905 out_unlock: 1906 device_unlock(dev); 1907 } 1908 1909 static const struct acpi_device_id acpi_nfit_ids[] = { 1910 { "ACPI0012", 0 }, 1911 { "", 0 }, 1912 }; 1913 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 1914 1915 static struct acpi_driver acpi_nfit_driver = { 1916 .name = KBUILD_MODNAME, 1917 .ids = acpi_nfit_ids, 1918 .ops = { 1919 .add = acpi_nfit_add, 1920 .remove = acpi_nfit_remove, 1921 .notify = acpi_nfit_notify, 1922 }, 1923 }; 1924 1925 static __init int nfit_init(void) 1926 { 1927 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 1928 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 1929 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 1930 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 1931 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 1932 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 1933 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 1934 1935 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); 1936 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); 1937 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); 1938 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); 1939 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); 1940 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); 1941 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); 1942 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); 1943 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); 1944 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 1945 1946 return acpi_bus_register_driver(&acpi_nfit_driver); 1947 } 1948 1949 static __exit void nfit_exit(void) 1950 { 1951 acpi_bus_unregister_driver(&acpi_nfit_driver); 1952 } 1953 1954 module_init(nfit_init); 1955 module_exit(nfit_exit); 1956 MODULE_LICENSE("GPL v2"); 1957 MODULE_AUTHOR("Intel Corporation"); 1958 1959 1960 1961 1962 1963 /* LDV_COMMENT_BEGIN_MAIN */ 1964 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 1965 1966 /*###########################################################################*/ 1967 1968 /*############## Driver Environment Generator 0.2 output ####################*/ 1969 1970 /*###########################################################################*/ 1971 1972 1973 1974 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1975 void ldv_check_final_state(void); 1976 1977 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1978 void ldv_check_return_value(int res); 1979 1980 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1981 void ldv_check_return_value_probe(int res); 1982 1983 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1984 void ldv_initialize(void); 1985 1986 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1987 void ldv_handler_precall(void); 1988 1989 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1990 int nondet_int(void); 1991 1992 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1993 int LDV_IN_INTERRUPT; 1994 1995 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1996 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 1997 1998 1999 2000 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 2001 /*============================= VARIABLE DECLARATION PART =============================*/ 2002 /** STRUCT: struct type: attribute_group, struct name: acpi_nfit_dimm_attribute_group **/ 2003 /* content: static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n)*/ 2004 /* LDV_COMMENT_END_PREP */ 2005 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 2006 struct kobject * var_group1; 2007 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 2008 struct attribute * var_group2; 2009 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 2010 int var_acpi_nfit_dimm_attr_visible_29_p2; 2011 2012 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2013 /* content: static int acpi_nfit_add(struct acpi_device *adev)*/ 2014 /* LDV_COMMENT_END_PREP */ 2015 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_add" */ 2016 struct acpi_device * var_group3; 2017 /* content: static int acpi_nfit_remove(struct acpi_device *adev)*/ 2018 /* LDV_COMMENT_END_PREP */ 2019 /* content: static void acpi_nfit_notify(struct acpi_device *adev, u32 event)*/ 2020 /* LDV_COMMENT_END_PREP */ 2021 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_notify" */ 2022 u32 var_acpi_nfit_notify_62_p1; 2023 2024 2025 2026 2027 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 2028 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 2029 /*============================= VARIABLE INITIALIZING PART =============================*/ 2030 LDV_IN_INTERRUPT=1; 2031 2032 2033 2034 2035 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 2036 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 2037 /*============================= FUNCTION CALL SECTION =============================*/ 2038 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 2039 ldv_initialize(); 2040 2041 /** INIT: init_type: ST_MODULE_INIT **/ 2042 /* content: static __init int nfit_init(void)*/ 2043 /* LDV_COMMENT_END_PREP */ 2044 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 2045 ldv_handler_precall(); 2046 if(nfit_init()) 2047 goto ldv_final; 2048 2049 2050 int ldv_s_acpi_nfit_driver_acpi_driver = 0; 2051 2052 2053 2054 while( nondet_int() 2055 || !(ldv_s_acpi_nfit_driver_acpi_driver == 0) 2056 ) { 2057 2058 switch(nondet_int()) { 2059 2060 case 0: { 2061 2062 /** STRUCT: struct type: attribute_group, struct name: acpi_nfit_dimm_attribute_group **/ 2063 2064 2065 /* content: static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n)*/ 2066 /* LDV_COMMENT_END_PREP */ 2067 /* LDV_COMMENT_FUNCTION_CALL Function from field "is_visible" from driver structure with callbacks "acpi_nfit_dimm_attribute_group" */ 2068 ldv_handler_precall(); 2069 acpi_nfit_dimm_attr_visible( var_group1, var_group2, var_acpi_nfit_dimm_attr_visible_29_p2); 2070 2071 2072 2073 2074 } 2075 2076 break; 2077 case 1: { 2078 2079 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2080 if(ldv_s_acpi_nfit_driver_acpi_driver==0) { 2081 2082 /* content: static int acpi_nfit_remove(struct acpi_device *adev)*/ 2083 /* LDV_COMMENT_END_PREP */ 2084 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "acpi_nfit_driver" */ 2085 ldv_handler_precall(); 2086 acpi_nfit_remove( var_group3); 2087 ldv_s_acpi_nfit_driver_acpi_driver=0; 2088 2089 } 2090 2091 } 2092 2093 break; 2094 case 2: { 2095 2096 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2097 2098 2099 /* content: static int acpi_nfit_add(struct acpi_device *adev)*/ 2100 /* LDV_COMMENT_END_PREP */ 2101 /* LDV_COMMENT_FUNCTION_CALL Function from field "add" from driver structure with callbacks "acpi_nfit_driver" */ 2102 ldv_handler_precall(); 2103 acpi_nfit_add( var_group3); 2104 2105 2106 2107 2108 } 2109 2110 break; 2111 case 3: { 2112 2113 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2114 2115 2116 /* content: static void acpi_nfit_notify(struct acpi_device *adev, u32 event)*/ 2117 /* LDV_COMMENT_END_PREP */ 2118 /* LDV_COMMENT_FUNCTION_CALL Function from field "notify" from driver structure with callbacks "acpi_nfit_driver" */ 2119 ldv_handler_precall(); 2120 acpi_nfit_notify( var_group3, var_acpi_nfit_notify_62_p1); 2121 2122 2123 2124 2125 } 2126 2127 break; 2128 default: break; 2129 2130 } 2131 2132 } 2133 2134 ldv_module_exit: 2135 2136 /** INIT: init_type: ST_MODULE_EXIT **/ 2137 /* content: static __exit void nfit_exit(void)*/ 2138 /* LDV_COMMENT_END_PREP */ 2139 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 2140 ldv_handler_precall(); 2141 nfit_exit(); 2142 2143 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 2144 ldv_final: ldv_check_final_state(); 2145 2146 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 2147 return; 2148 2149 } 2150 #endif 2151 2152 /* LDV_COMMENT_END_MAIN */ 2153 2154 #line 84 "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.o.c.prepared"
1 2 3 #include <linux/mutex.h> 4 #include <linux/errno.h> 5 #include <verifier/rcv.h> 6 #include <kernel-model/ERR.inc> 7 8 static int ldv_mutex_i_mutex_of_inode = 1; 9 10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 11 int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock) 12 { 13 int nondetermined; 14 15 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 16 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 17 18 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 19 nondetermined = ldv_undef_int(); 20 21 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 22 if (nondetermined) 23 { 24 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 25 ldv_mutex_i_mutex_of_inode = 2; 26 /* LDV_COMMENT_RETURN Finish with success */ 27 return 0; 28 } 29 else 30 { 31 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'i_mutex_of_inode' is keeped unlocked */ 32 return -EINTR; 33 } 34 } 35 36 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 37 int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock) 38 { 39 int nondetermined; 40 41 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 42 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 43 44 /* LDV_COMMENT_OTHER Construct nondetermined result */ 45 nondetermined = ldv_undef_int(); 46 47 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 48 if (nondetermined) 49 { 50 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 51 ldv_mutex_i_mutex_of_inode = 2; 52 /* LDV_COMMENT_RETURN Finish with success*/ 53 return 0; 54 } 55 else 56 { 57 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'i_mutex_of_inode' is keeped unlocked */ 58 return -EINTR; 59 } 60 } 61 62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and lock it */ 63 void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock) 64 { 65 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 66 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 67 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 68 ldv_mutex_i_mutex_of_inode = 2; 69 } 70 71 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and nondeterministically lock it. Return 0 on fails */ 72 int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock) 73 { 74 int is_mutex_held_by_another_thread; 75 76 /* LDV_COMMENT_ASSERT It may be an error if mutex 'i_mutex_of_inode' is locked at this point */ 77 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 78 79 /* LDV_COMMENT_OTHER Construct nondetermined result */ 80 is_mutex_held_by_another_thread = ldv_undef_int(); 81 82 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 83 if (is_mutex_held_by_another_thread) 84 { 85 /* LDV_COMMENT_RETURN Finish with fail */ 86 return 0; 87 } 88 else 89 { 90 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 91 ldv_mutex_i_mutex_of_inode = 2; 92 /* LDV_COMMENT_RETURN Finish with success */ 93 return 1; 94 } 95 } 96 97 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode') Lock mutex 'i_mutex_of_inode' if atomic decrement result is zero */ 98 int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock) 99 { 100 int atomic_value_after_dec; 101 102 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked (since we may lock it in this function) */ 103 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 104 105 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 106 atomic_value_after_dec = ldv_undef_int(); 107 108 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 109 if (atomic_value_after_dec == 0) 110 { 111 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode', as atomic has decremented to zero */ 112 ldv_mutex_i_mutex_of_inode = 2; 113 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'i_mutex_of_inode' */ 114 return 1; 115 } 116 117 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'i_mutex_of_inode' */ 118 return 0; 119 } 120 121 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 122 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_i_mutex_of_inode') Check whether mutex 'i_mutex_of_inode' was locked */ 123 int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock) 124 { 125 int nondetermined; 126 127 if(ldv_mutex_i_mutex_of_inode == 1) 128 { 129 /* LDV_COMMENT_OTHER Construct nondetermined result */ 130 nondetermined = ldv_undef_int(); 131 132 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'i_mutex_of_inode' was locked */ 133 if(nondetermined) 134 { 135 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was unlocked */ 136 return 0; 137 } 138 else 139 { 140 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */ 141 return 1; 142 } 143 } 144 else 145 { 146 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */ 147 return 1; 148 } 149 } 150 151 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was locked and unlock it */ 152 void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock) 153 { 154 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be locked */ 155 ldv_assert(ldv_mutex_i_mutex_of_inode == 2); 156 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'i_mutex_of_inode' */ 157 ldv_mutex_i_mutex_of_inode = 1; 158 } 159 160 161 162 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 163 void ldv_usb_lock_device_i_mutex_of_inode(void) 164 { 165 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'i_mutex_of_inode' */ 166 ldv_mutex_lock_i_mutex_of_inode(NULL); 167 } 168 169 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 170 int ldv_usb_trylock_device_i_mutex_of_inode(void) 171 { 172 return ldv_mutex_trylock_i_mutex_of_inode(NULL); 173 } 174 175 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 176 int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void) 177 { 178 if(ldv_undef_int()) { 179 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 180 ldv_mutex_lock_i_mutex_of_inode(NULL); 181 /* LDV_COMMENT_RETURN Finish with success */ 182 return 0; 183 } else 184 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 185 return ldv_undef_int_negative(); 186 } 187 188 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 189 void ldv_usb_unlock_device_i_mutex_of_inode(void) { 190 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'i_mutex_of_inode' */ 191 ldv_mutex_unlock_i_mutex_of_inode(NULL); 192 } 193 194 static int ldv_mutex_init_mutex_of_acpi_nfit_desc = 1; 195 196 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_init_mutex_of_acpi_nfit_desc') Check that mutex 'init_mutex_of_acpi_nfit_desc' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 197 int ldv_mutex_lock_interruptible_init_mutex_of_acpi_nfit_desc(struct mutex *lock) 198 { 199 int nondetermined; 200 201 /* LDV_COMMENT_ASSERT Mutex 'init_mutex_of_acpi_nfit_desc' must be unlocked */ 202 ldv_assert(ldv_mutex_init_mutex_of_acpi_nfit_desc == 1); 203 204 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 205 nondetermined = ldv_undef_int(); 206 207 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'init_mutex_of_acpi_nfit_desc' */ 208 if (nondetermined) 209 { 210 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'init_mutex_of_acpi_nfit_desc' */ 211 ldv_mutex_init_mutex_of_acpi_nfit_desc = 2; 212 /* LDV_COMMENT_RETURN Finish with success */ 213 return 0; 214 } 215 else 216 { 217 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'init_mutex_of_acpi_nfit_desc' is keeped unlocked */ 218 return -EINTR; 219 } 220 } 221 222 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_init_mutex_of_acpi_nfit_desc') Check that mutex 'init_mutex_of_acpi_nfit_desc' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 223 int ldv_mutex_lock_killable_init_mutex_of_acpi_nfit_desc(struct mutex *lock) 224 { 225 int nondetermined; 226 227 /* LDV_COMMENT_ASSERT Mutex 'init_mutex_of_acpi_nfit_desc' must be unlocked */ 228 ldv_assert(ldv_mutex_init_mutex_of_acpi_nfit_desc == 1); 229 230 /* LDV_COMMENT_OTHER Construct nondetermined result */ 231 nondetermined = ldv_undef_int(); 232 233 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'init_mutex_of_acpi_nfit_desc' */ 234 if (nondetermined) 235 { 236 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'init_mutex_of_acpi_nfit_desc' */ 237 ldv_mutex_init_mutex_of_acpi_nfit_desc = 2; 238 /* LDV_COMMENT_RETURN Finish with success*/ 239 return 0; 240 } 241 else 242 { 243 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'init_mutex_of_acpi_nfit_desc' is keeped unlocked */ 244 return -EINTR; 245 } 246 } 247 248 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_init_mutex_of_acpi_nfit_desc') Check that mutex 'init_mutex_of_acpi_nfit_desc' was not locked and lock it */ 249 void ldv_mutex_lock_init_mutex_of_acpi_nfit_desc(struct mutex *lock) 250 { 251 /* LDV_COMMENT_ASSERT Mutex 'init_mutex_of_acpi_nfit_desc' must be unlocked */ 252 ldv_assert(ldv_mutex_init_mutex_of_acpi_nfit_desc == 1); 253 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'init_mutex_of_acpi_nfit_desc' */ 254 ldv_mutex_init_mutex_of_acpi_nfit_desc = 2; 255 } 256 257 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_init_mutex_of_acpi_nfit_desc') Check that mutex 'init_mutex_of_acpi_nfit_desc' was not locked and nondeterministically lock it. Return 0 on fails */ 258 int ldv_mutex_trylock_init_mutex_of_acpi_nfit_desc(struct mutex *lock) 259 { 260 int is_mutex_held_by_another_thread; 261 262 /* LDV_COMMENT_ASSERT It may be an error if mutex 'init_mutex_of_acpi_nfit_desc' is locked at this point */ 263 ldv_assert(ldv_mutex_init_mutex_of_acpi_nfit_desc == 1); 264 265 /* LDV_COMMENT_OTHER Construct nondetermined result */ 266 is_mutex_held_by_another_thread = ldv_undef_int(); 267 268 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'init_mutex_of_acpi_nfit_desc' */ 269 if (is_mutex_held_by_another_thread) 270 { 271 /* LDV_COMMENT_RETURN Finish with fail */ 272 return 0; 273 } 274 else 275 { 276 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'init_mutex_of_acpi_nfit_desc' */ 277 ldv_mutex_init_mutex_of_acpi_nfit_desc = 2; 278 /* LDV_COMMENT_RETURN Finish with success */ 279 return 1; 280 } 281 } 282 283 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_init_mutex_of_acpi_nfit_desc') Lock mutex 'init_mutex_of_acpi_nfit_desc' if atomic decrement result is zero */ 284 int ldv_atomic_dec_and_mutex_lock_init_mutex_of_acpi_nfit_desc(atomic_t *cnt, struct mutex *lock) 285 { 286 int atomic_value_after_dec; 287 288 /* LDV_COMMENT_ASSERT Mutex 'init_mutex_of_acpi_nfit_desc' must be unlocked (since we may lock it in this function) */ 289 ldv_assert(ldv_mutex_init_mutex_of_acpi_nfit_desc == 1); 290 291 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 292 atomic_value_after_dec = ldv_undef_int(); 293 294 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 295 if (atomic_value_after_dec == 0) 296 { 297 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'init_mutex_of_acpi_nfit_desc', as atomic has decremented to zero */ 298 ldv_mutex_init_mutex_of_acpi_nfit_desc = 2; 299 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'init_mutex_of_acpi_nfit_desc' */ 300 return 1; 301 } 302 303 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'init_mutex_of_acpi_nfit_desc' */ 304 return 0; 305 } 306 307 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 308 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_init_mutex_of_acpi_nfit_desc') Check whether mutex 'init_mutex_of_acpi_nfit_desc' was locked */ 309 int ldv_mutex_is_locked_init_mutex_of_acpi_nfit_desc(struct mutex *lock) 310 { 311 int nondetermined; 312 313 if(ldv_mutex_init_mutex_of_acpi_nfit_desc == 1) 314 { 315 /* LDV_COMMENT_OTHER Construct nondetermined result */ 316 nondetermined = ldv_undef_int(); 317 318 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'init_mutex_of_acpi_nfit_desc' was locked */ 319 if(nondetermined) 320 { 321 /* LDV_COMMENT_RETURN Mutex 'init_mutex_of_acpi_nfit_desc' was unlocked */ 322 return 0; 323 } 324 else 325 { 326 /* LDV_COMMENT_RETURN Mutex 'init_mutex_of_acpi_nfit_desc' was locked */ 327 return 1; 328 } 329 } 330 else 331 { 332 /* LDV_COMMENT_RETURN Mutex 'init_mutex_of_acpi_nfit_desc' was locked */ 333 return 1; 334 } 335 } 336 337 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_init_mutex_of_acpi_nfit_desc') Check that mutex 'init_mutex_of_acpi_nfit_desc' was locked and unlock it */ 338 void ldv_mutex_unlock_init_mutex_of_acpi_nfit_desc(struct mutex *lock) 339 { 340 /* LDV_COMMENT_ASSERT Mutex 'init_mutex_of_acpi_nfit_desc' must be locked */ 341 ldv_assert(ldv_mutex_init_mutex_of_acpi_nfit_desc == 2); 342 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'init_mutex_of_acpi_nfit_desc' */ 343 ldv_mutex_init_mutex_of_acpi_nfit_desc = 1; 344 } 345 346 347 348 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 349 void ldv_usb_lock_device_init_mutex_of_acpi_nfit_desc(void) 350 { 351 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'init_mutex_of_acpi_nfit_desc' */ 352 ldv_mutex_lock_init_mutex_of_acpi_nfit_desc(NULL); 353 } 354 355 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 356 int ldv_usb_trylock_device_init_mutex_of_acpi_nfit_desc(void) 357 { 358 return ldv_mutex_trylock_init_mutex_of_acpi_nfit_desc(NULL); 359 } 360 361 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 362 int ldv_usb_lock_device_for_reset_init_mutex_of_acpi_nfit_desc(void) 363 { 364 if(ldv_undef_int()) { 365 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'init_mutex_of_acpi_nfit_desc' */ 366 ldv_mutex_lock_init_mutex_of_acpi_nfit_desc(NULL); 367 /* LDV_COMMENT_RETURN Finish with success */ 368 return 0; 369 } else 370 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 371 return ldv_undef_int_negative(); 372 } 373 374 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 375 void ldv_usb_unlock_device_init_mutex_of_acpi_nfit_desc(void) { 376 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'init_mutex_of_acpi_nfit_desc' */ 377 ldv_mutex_unlock_init_mutex_of_acpi_nfit_desc(NULL); 378 } 379 380 static int ldv_mutex_lock = 1; 381 382 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_lock') Check that mutex 'lock' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 383 int ldv_mutex_lock_interruptible_lock(struct mutex *lock) 384 { 385 int nondetermined; 386 387 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 388 ldv_assert(ldv_mutex_lock == 1); 389 390 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 391 nondetermined = ldv_undef_int(); 392 393 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 394 if (nondetermined) 395 { 396 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 397 ldv_mutex_lock = 2; 398 /* LDV_COMMENT_RETURN Finish with success */ 399 return 0; 400 } 401 else 402 { 403 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'lock' is keeped unlocked */ 404 return -EINTR; 405 } 406 } 407 408 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_lock') Check that mutex 'lock' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 409 int ldv_mutex_lock_killable_lock(struct mutex *lock) 410 { 411 int nondetermined; 412 413 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 414 ldv_assert(ldv_mutex_lock == 1); 415 416 /* LDV_COMMENT_OTHER Construct nondetermined result */ 417 nondetermined = ldv_undef_int(); 418 419 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 420 if (nondetermined) 421 { 422 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 423 ldv_mutex_lock = 2; 424 /* LDV_COMMENT_RETURN Finish with success*/ 425 return 0; 426 } 427 else 428 { 429 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'lock' is keeped unlocked */ 430 return -EINTR; 431 } 432 } 433 434 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_lock') Check that mutex 'lock' was not locked and lock it */ 435 void ldv_mutex_lock_lock(struct mutex *lock) 436 { 437 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 438 ldv_assert(ldv_mutex_lock == 1); 439 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 440 ldv_mutex_lock = 2; 441 } 442 443 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_lock') Check that mutex 'lock' was not locked and nondeterministically lock it. Return 0 on fails */ 444 int ldv_mutex_trylock_lock(struct mutex *lock) 445 { 446 int is_mutex_held_by_another_thread; 447 448 /* LDV_COMMENT_ASSERT It may be an error if mutex 'lock' is locked at this point */ 449 ldv_assert(ldv_mutex_lock == 1); 450 451 /* LDV_COMMENT_OTHER Construct nondetermined result */ 452 is_mutex_held_by_another_thread = ldv_undef_int(); 453 454 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 455 if (is_mutex_held_by_another_thread) 456 { 457 /* LDV_COMMENT_RETURN Finish with fail */ 458 return 0; 459 } 460 else 461 { 462 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 463 ldv_mutex_lock = 2; 464 /* LDV_COMMENT_RETURN Finish with success */ 465 return 1; 466 } 467 } 468 469 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_lock') Lock mutex 'lock' if atomic decrement result is zero */ 470 int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock) 471 { 472 int atomic_value_after_dec; 473 474 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked (since we may lock it in this function) */ 475 ldv_assert(ldv_mutex_lock == 1); 476 477 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 478 atomic_value_after_dec = ldv_undef_int(); 479 480 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 481 if (atomic_value_after_dec == 0) 482 { 483 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock', as atomic has decremented to zero */ 484 ldv_mutex_lock = 2; 485 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'lock' */ 486 return 1; 487 } 488 489 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'lock' */ 490 return 0; 491 } 492 493 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 494 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_lock') Check whether mutex 'lock' was locked */ 495 int ldv_mutex_is_locked_lock(struct mutex *lock) 496 { 497 int nondetermined; 498 499 if(ldv_mutex_lock == 1) 500 { 501 /* LDV_COMMENT_OTHER Construct nondetermined result */ 502 nondetermined = ldv_undef_int(); 503 504 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'lock' was locked */ 505 if(nondetermined) 506 { 507 /* LDV_COMMENT_RETURN Mutex 'lock' was unlocked */ 508 return 0; 509 } 510 else 511 { 512 /* LDV_COMMENT_RETURN Mutex 'lock' was locked */ 513 return 1; 514 } 515 } 516 else 517 { 518 /* LDV_COMMENT_RETURN Mutex 'lock' was locked */ 519 return 1; 520 } 521 } 522 523 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_lock') Check that mutex 'lock' was locked and unlock it */ 524 void ldv_mutex_unlock_lock(struct mutex *lock) 525 { 526 /* LDV_COMMENT_ASSERT Mutex 'lock' must be locked */ 527 ldv_assert(ldv_mutex_lock == 2); 528 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'lock' */ 529 ldv_mutex_lock = 1; 530 } 531 532 533 534 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 535 void ldv_usb_lock_device_lock(void) 536 { 537 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'lock' */ 538 ldv_mutex_lock_lock(NULL); 539 } 540 541 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 542 int ldv_usb_trylock_device_lock(void) 543 { 544 return ldv_mutex_trylock_lock(NULL); 545 } 546 547 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 548 int ldv_usb_lock_device_for_reset_lock(void) 549 { 550 if(ldv_undef_int()) { 551 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 552 ldv_mutex_lock_lock(NULL); 553 /* LDV_COMMENT_RETURN Finish with success */ 554 return 0; 555 } else 556 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 557 return ldv_undef_int_negative(); 558 } 559 560 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 561 void ldv_usb_unlock_device_lock(void) { 562 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'lock' */ 563 ldv_mutex_unlock_lock(NULL); 564 } 565 566 static int ldv_mutex_mutex_of_device = 1; 567 568 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mutex_of_device') Check that mutex 'mutex_of_device' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 569 int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock) 570 { 571 int nondetermined; 572 573 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 574 ldv_assert(ldv_mutex_mutex_of_device == 1); 575 576 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 577 nondetermined = ldv_undef_int(); 578 579 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 580 if (nondetermined) 581 { 582 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 583 ldv_mutex_mutex_of_device = 2; 584 /* LDV_COMMENT_RETURN Finish with success */ 585 return 0; 586 } 587 else 588 { 589 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mutex_of_device' is keeped unlocked */ 590 return -EINTR; 591 } 592 } 593 594 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mutex_of_device') Check that mutex 'mutex_of_device' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 595 int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock) 596 { 597 int nondetermined; 598 599 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 600 ldv_assert(ldv_mutex_mutex_of_device == 1); 601 602 /* LDV_COMMENT_OTHER Construct nondetermined result */ 603 nondetermined = ldv_undef_int(); 604 605 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 606 if (nondetermined) 607 { 608 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 609 ldv_mutex_mutex_of_device = 2; 610 /* LDV_COMMENT_RETURN Finish with success*/ 611 return 0; 612 } 613 else 614 { 615 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mutex_of_device' is keeped unlocked */ 616 return -EINTR; 617 } 618 } 619 620 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and lock it */ 621 void ldv_mutex_lock_mutex_of_device(struct mutex *lock) 622 { 623 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 624 ldv_assert(ldv_mutex_mutex_of_device == 1); 625 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 626 ldv_mutex_mutex_of_device = 2; 627 } 628 629 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and nondeterministically lock it. Return 0 on fails */ 630 int ldv_mutex_trylock_mutex_of_device(struct mutex *lock) 631 { 632 int is_mutex_held_by_another_thread; 633 634 /* LDV_COMMENT_ASSERT It may be an error if mutex 'mutex_of_device' is locked at this point */ 635 ldv_assert(ldv_mutex_mutex_of_device == 1); 636 637 /* LDV_COMMENT_OTHER Construct nondetermined result */ 638 is_mutex_held_by_another_thread = ldv_undef_int(); 639 640 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 641 if (is_mutex_held_by_another_thread) 642 { 643 /* LDV_COMMENT_RETURN Finish with fail */ 644 return 0; 645 } 646 else 647 { 648 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 649 ldv_mutex_mutex_of_device = 2; 650 /* LDV_COMMENT_RETURN Finish with success */ 651 return 1; 652 } 653 } 654 655 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mutex_of_device') Lock mutex 'mutex_of_device' if atomic decrement result is zero */ 656 int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock) 657 { 658 int atomic_value_after_dec; 659 660 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked (since we may lock it in this function) */ 661 ldv_assert(ldv_mutex_mutex_of_device == 1); 662 663 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 664 atomic_value_after_dec = ldv_undef_int(); 665 666 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 667 if (atomic_value_after_dec == 0) 668 { 669 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device', as atomic has decremented to zero */ 670 ldv_mutex_mutex_of_device = 2; 671 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mutex_of_device' */ 672 return 1; 673 } 674 675 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mutex_of_device' */ 676 return 0; 677 } 678 679 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 680 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mutex_of_device') Check whether mutex 'mutex_of_device' was locked */ 681 int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock) 682 { 683 int nondetermined; 684 685 if(ldv_mutex_mutex_of_device == 1) 686 { 687 /* LDV_COMMENT_OTHER Construct nondetermined result */ 688 nondetermined = ldv_undef_int(); 689 690 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mutex_of_device' was locked */ 691 if(nondetermined) 692 { 693 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was unlocked */ 694 return 0; 695 } 696 else 697 { 698 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */ 699 return 1; 700 } 701 } 702 else 703 { 704 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */ 705 return 1; 706 } 707 } 708 709 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mutex_of_device') Check that mutex 'mutex_of_device' was locked and unlock it */ 710 void ldv_mutex_unlock_mutex_of_device(struct mutex *lock) 711 { 712 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be locked */ 713 ldv_assert(ldv_mutex_mutex_of_device == 2); 714 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mutex_of_device' */ 715 ldv_mutex_mutex_of_device = 1; 716 } 717 718 719 720 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 721 void ldv_usb_lock_device_mutex_of_device(void) 722 { 723 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'mutex_of_device' */ 724 ldv_mutex_lock_mutex_of_device(NULL); 725 } 726 727 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 728 int ldv_usb_trylock_device_mutex_of_device(void) 729 { 730 return ldv_mutex_trylock_mutex_of_device(NULL); 731 } 732 733 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 734 int ldv_usb_lock_device_for_reset_mutex_of_device(void) 735 { 736 if(ldv_undef_int()) { 737 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 738 ldv_mutex_lock_mutex_of_device(NULL); 739 /* LDV_COMMENT_RETURN Finish with success */ 740 return 0; 741 } else 742 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 743 return ldv_undef_int_negative(); 744 } 745 746 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 747 void ldv_usb_unlock_device_mutex_of_device(void) { 748 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'mutex_of_device' */ 749 ldv_mutex_unlock_mutex_of_device(NULL); 750 } 751 752 static int ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 1; 753 754 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_spa_map_mutex_of_acpi_nfit_desc') Check that mutex 'spa_map_mutex_of_acpi_nfit_desc' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 755 int ldv_mutex_lock_interruptible_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock) 756 { 757 int nondetermined; 758 759 /* LDV_COMMENT_ASSERT Mutex 'spa_map_mutex_of_acpi_nfit_desc' must be unlocked */ 760 ldv_assert(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 1); 761 762 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 763 nondetermined = ldv_undef_int(); 764 765 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 766 if (nondetermined) 767 { 768 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 769 ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 2; 770 /* LDV_COMMENT_RETURN Finish with success */ 771 return 0; 772 } 773 else 774 { 775 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'spa_map_mutex_of_acpi_nfit_desc' is keeped unlocked */ 776 return -EINTR; 777 } 778 } 779 780 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_spa_map_mutex_of_acpi_nfit_desc') Check that mutex 'spa_map_mutex_of_acpi_nfit_desc' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 781 int ldv_mutex_lock_killable_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock) 782 { 783 int nondetermined; 784 785 /* LDV_COMMENT_ASSERT Mutex 'spa_map_mutex_of_acpi_nfit_desc' must be unlocked */ 786 ldv_assert(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 1); 787 788 /* LDV_COMMENT_OTHER Construct nondetermined result */ 789 nondetermined = ldv_undef_int(); 790 791 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 792 if (nondetermined) 793 { 794 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 795 ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 2; 796 /* LDV_COMMENT_RETURN Finish with success*/ 797 return 0; 798 } 799 else 800 { 801 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'spa_map_mutex_of_acpi_nfit_desc' is keeped unlocked */ 802 return -EINTR; 803 } 804 } 805 806 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_spa_map_mutex_of_acpi_nfit_desc') Check that mutex 'spa_map_mutex_of_acpi_nfit_desc' was not locked and lock it */ 807 void ldv_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock) 808 { 809 /* LDV_COMMENT_ASSERT Mutex 'spa_map_mutex_of_acpi_nfit_desc' must be unlocked */ 810 ldv_assert(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 1); 811 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 812 ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 2; 813 } 814 815 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_spa_map_mutex_of_acpi_nfit_desc') Check that mutex 'spa_map_mutex_of_acpi_nfit_desc' was not locked and nondeterministically lock it. Return 0 on fails */ 816 int ldv_mutex_trylock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock) 817 { 818 int is_mutex_held_by_another_thread; 819 820 /* LDV_COMMENT_ASSERT It may be an error if mutex 'spa_map_mutex_of_acpi_nfit_desc' is locked at this point */ 821 ldv_assert(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 1); 822 823 /* LDV_COMMENT_OTHER Construct nondetermined result */ 824 is_mutex_held_by_another_thread = ldv_undef_int(); 825 826 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 827 if (is_mutex_held_by_another_thread) 828 { 829 /* LDV_COMMENT_RETURN Finish with fail */ 830 return 0; 831 } 832 else 833 { 834 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 835 ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 2; 836 /* LDV_COMMENT_RETURN Finish with success */ 837 return 1; 838 } 839 } 840 841 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_spa_map_mutex_of_acpi_nfit_desc') Lock mutex 'spa_map_mutex_of_acpi_nfit_desc' if atomic decrement result is zero */ 842 int ldv_atomic_dec_and_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(atomic_t *cnt, struct mutex *lock) 843 { 844 int atomic_value_after_dec; 845 846 /* LDV_COMMENT_ASSERT Mutex 'spa_map_mutex_of_acpi_nfit_desc' must be unlocked (since we may lock it in this function) */ 847 ldv_assert(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 1); 848 849 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 850 atomic_value_after_dec = ldv_undef_int(); 851 852 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 853 if (atomic_value_after_dec == 0) 854 { 855 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'spa_map_mutex_of_acpi_nfit_desc', as atomic has decremented to zero */ 856 ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 2; 857 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 858 return 1; 859 } 860 861 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 862 return 0; 863 } 864 865 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 866 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_spa_map_mutex_of_acpi_nfit_desc') Check whether mutex 'spa_map_mutex_of_acpi_nfit_desc' was locked */ 867 int ldv_mutex_is_locked_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock) 868 { 869 int nondetermined; 870 871 if(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 1) 872 { 873 /* LDV_COMMENT_OTHER Construct nondetermined result */ 874 nondetermined = ldv_undef_int(); 875 876 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'spa_map_mutex_of_acpi_nfit_desc' was locked */ 877 if(nondetermined) 878 { 879 /* LDV_COMMENT_RETURN Mutex 'spa_map_mutex_of_acpi_nfit_desc' was unlocked */ 880 return 0; 881 } 882 else 883 { 884 /* LDV_COMMENT_RETURN Mutex 'spa_map_mutex_of_acpi_nfit_desc' was locked */ 885 return 1; 886 } 887 } 888 else 889 { 890 /* LDV_COMMENT_RETURN Mutex 'spa_map_mutex_of_acpi_nfit_desc' was locked */ 891 return 1; 892 } 893 } 894 895 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_spa_map_mutex_of_acpi_nfit_desc') Check that mutex 'spa_map_mutex_of_acpi_nfit_desc' was locked and unlock it */ 896 void ldv_mutex_unlock_spa_map_mutex_of_acpi_nfit_desc(struct mutex *lock) 897 { 898 /* LDV_COMMENT_ASSERT Mutex 'spa_map_mutex_of_acpi_nfit_desc' must be locked */ 899 ldv_assert(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 2); 900 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 901 ldv_mutex_spa_map_mutex_of_acpi_nfit_desc = 1; 902 } 903 904 905 906 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 907 void ldv_usb_lock_device_spa_map_mutex_of_acpi_nfit_desc(void) 908 { 909 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'spa_map_mutex_of_acpi_nfit_desc' */ 910 ldv_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(NULL); 911 } 912 913 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 914 int ldv_usb_trylock_device_spa_map_mutex_of_acpi_nfit_desc(void) 915 { 916 return ldv_mutex_trylock_spa_map_mutex_of_acpi_nfit_desc(NULL); 917 } 918 919 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 920 int ldv_usb_lock_device_for_reset_spa_map_mutex_of_acpi_nfit_desc(void) 921 { 922 if(ldv_undef_int()) { 923 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'spa_map_mutex_of_acpi_nfit_desc' */ 924 ldv_mutex_lock_spa_map_mutex_of_acpi_nfit_desc(NULL); 925 /* LDV_COMMENT_RETURN Finish with success */ 926 return 0; 927 } else 928 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 929 return ldv_undef_int_negative(); 930 } 931 932 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 933 void ldv_usb_unlock_device_spa_map_mutex_of_acpi_nfit_desc(void) { 934 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'spa_map_mutex_of_acpi_nfit_desc' */ 935 ldv_mutex_unlock_spa_map_mutex_of_acpi_nfit_desc(NULL); 936 } 937 938 939 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all mutexes are unlocked at the end */ 940 void ldv_check_final_state(void) 941 { 942 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked at the end */ 943 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 944 /* LDV_COMMENT_ASSERT Mutex 'init_mutex_of_acpi_nfit_desc' must be unlocked at the end */ 945 ldv_assert(ldv_mutex_init_mutex_of_acpi_nfit_desc == 1); 946 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked at the end */ 947 ldv_assert(ldv_mutex_lock == 1); 948 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked at the end */ 949 ldv_assert(ldv_mutex_mutex_of_device == 1); 950 /* LDV_COMMENT_ASSERT Mutex 'spa_map_mutex_of_acpi_nfit_desc' must be unlocked at the end */ 951 ldv_assert(ldv_mutex_spa_map_mutex_of_acpi_nfit_desc == 1); 952 }
1 #ifndef _LDV_RCV_H_ 2 #define _LDV_RCV_H_ 3 4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error 5 label like the standard assert(). */ 6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error()) 7 8 /* The error label wrapper. It is used because of some static verifiers (like 9 BLAST) don't accept multiple error labels through a program. */ 10 static inline void ldv_error(void) 11 { 12 LDV_ERROR: goto LDV_ERROR; 13 } 14 15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is 16 avoided by verifiers. */ 17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop()) 18 19 /* Infinite loop, that causes verifiers to skip such paths. */ 20 static inline void ldv_stop(void) { 21 LDV_STOP: goto LDV_STOP; 22 } 23 24 /* Special nondeterministic functions. */ 25 int ldv_undef_int(void); 26 void *ldv_undef_ptr(void); 27 unsigned long ldv_undef_ulong(void); 28 long ldv_undef_long(void); 29 /* Return nondeterministic negative integer number. */ 30 static inline int ldv_undef_int_negative(void) 31 { 32 int ret = ldv_undef_int(); 33 34 ldv_assume(ret < 0); 35 36 return ret; 37 } 38 /* Return nondeterministic nonpositive integer number. */ 39 static inline int ldv_undef_int_nonpositive(void) 40 { 41 int ret = ldv_undef_int(); 42 43 ldv_assume(ret <= 0); 44 45 return ret; 46 } 47 48 /* Add explicit model for __builin_expect GCC function. Without the model a 49 return value will be treated as nondetermined by verifiers. */ 50 static inline long __builtin_expect(long exp, long c) 51 { 52 return exp; 53 } 54 55 /* This function causes the program to exit abnormally. GCC implements this 56 function by using a target-dependent mechanism (such as intentionally executing 57 an illegal instruction) or by calling abort. The mechanism used may vary from 58 release to release so you should not rely on any particular implementation. 59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */ 60 static inline void __builtin_trap(void) 61 { 62 ldv_assert(0); 63 } 64 65 /* The constant is for simulating an error of ldv_undef_ptr() function. */ 66 #define LDV_PTR_MAX 2012 67 68 #endif /* _LDV_RCV_H_ */
1 /* 2 * device.h - generic, centralized driver model 3 * 4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> 5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> 6 * Copyright (c) 2008-2009 Novell Inc. 7 * 8 * This file is released under the GPLv2 9 * 10 * See Documentation/driver-model/ for more information. 11 */ 12 13 #ifndef _DEVICE_H_ 14 #define _DEVICE_H_ 15 16 #include <linux/ioport.h> 17 #include <linux/kobject.h> 18 #include <linux/klist.h> 19 #include <linux/list.h> 20 #include <linux/lockdep.h> 21 #include <linux/compiler.h> 22 #include <linux/types.h> 23 #include <linux/mutex.h> 24 #include <linux/pinctrl/devinfo.h> 25 #include <linux/pm.h> 26 #include <linux/atomic.h> 27 #include <linux/ratelimit.h> 28 #include <linux/uidgid.h> 29 #include <linux/gfp.h> 30 #include <asm/device.h> 31 32 struct device; 33 struct device_private; 34 struct device_driver; 35 struct driver_private; 36 struct module; 37 struct class; 38 struct subsys_private; 39 struct bus_type; 40 struct device_node; 41 struct fwnode_handle; 42 struct iommu_ops; 43 struct iommu_group; 44 45 struct bus_attribute { 46 struct attribute attr; 47 ssize_t (*show)(struct bus_type *bus, char *buf); 48 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); 49 }; 50 51 #define BUS_ATTR(_name, _mode, _show, _store) \ 52 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) 53 #define BUS_ATTR_RW(_name) \ 54 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) 55 #define BUS_ATTR_RO(_name) \ 56 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) 57 58 extern int __must_check bus_create_file(struct bus_type *, 59 struct bus_attribute *); 60 extern void bus_remove_file(struct bus_type *, struct bus_attribute *); 61 62 /** 63 * struct bus_type - The bus type of the device 64 * 65 * @name: The name of the bus. 66 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). 67 * @dev_root: Default device to use as the parent. 68 * @dev_attrs: Default attributes of the devices on the bus. 69 * @bus_groups: Default attributes of the bus. 70 * @dev_groups: Default attributes of the devices on the bus. 71 * @drv_groups: Default attributes of the device drivers on the bus. 72 * @match: Called, perhaps multiple times, whenever a new device or driver 73 * is added for this bus. It should return a nonzero value if the 74 * given device can be handled by the given driver. 75 * @uevent: Called when a device is added, removed, or a few other things 76 * that generate uevents to add the environment variables. 77 * @probe: Called when a new device or driver add to this bus, and callback 78 * the specific driver's probe to initial the matched device. 79 * @remove: Called when a device removed from this bus. 80 * @shutdown: Called at shut-down time to quiesce the device. 81 * 82 * @online: Called to put the device back online (after offlining it). 83 * @offline: Called to put the device offline for hot-removal. May fail. 84 * 85 * @suspend: Called when a device on this bus wants to go to sleep mode. 86 * @resume: Called to bring a device on this bus out of sleep mode. 87 * @pm: Power management operations of this bus, callback the specific 88 * device driver's pm-ops. 89 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU 90 * driver implementations to a bus and allow the driver to do 91 * bus-specific setup 92 * @p: The private data of the driver core, only the driver core can 93 * touch this. 94 * @lock_key: Lock class key for use by the lock validator 95 * 96 * A bus is a channel between the processor and one or more devices. For the 97 * purposes of the device model, all devices are connected via a bus, even if 98 * it is an internal, virtual, "platform" bus. Buses can plug into each other. 99 * A USB controller is usually a PCI device, for example. The device model 100 * represents the actual connections between buses and the devices they control. 101 * A bus is represented by the bus_type structure. It contains the name, the 102 * default attributes, the bus' methods, PM operations, and the driver core's 103 * private data. 104 */ 105 struct bus_type { 106 const char *name; 107 const char *dev_name; 108 struct device *dev_root; 109 struct device_attribute *dev_attrs; /* use dev_groups instead */ 110 const struct attribute_group **bus_groups; 111 const struct attribute_group **dev_groups; 112 const struct attribute_group **drv_groups; 113 114 int (*match)(struct device *dev, struct device_driver *drv); 115 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 116 int (*probe)(struct device *dev); 117 int (*remove)(struct device *dev); 118 void (*shutdown)(struct device *dev); 119 120 int (*online)(struct device *dev); 121 int (*offline)(struct device *dev); 122 123 int (*suspend)(struct device *dev, pm_message_t state); 124 int (*resume)(struct device *dev); 125 126 const struct dev_pm_ops *pm; 127 128 const struct iommu_ops *iommu_ops; 129 130 struct subsys_private *p; 131 struct lock_class_key lock_key; 132 }; 133 134 extern int __must_check bus_register(struct bus_type *bus); 135 136 extern void bus_unregister(struct bus_type *bus); 137 138 extern int __must_check bus_rescan_devices(struct bus_type *bus); 139 140 /* iterator helpers for buses */ 141 struct subsys_dev_iter { 142 struct klist_iter ki; 143 const struct device_type *type; 144 }; 145 void subsys_dev_iter_init(struct subsys_dev_iter *iter, 146 struct bus_type *subsys, 147 struct device *start, 148 const struct device_type *type); 149 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); 150 void subsys_dev_iter_exit(struct subsys_dev_iter *iter); 151 152 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, 153 int (*fn)(struct device *dev, void *data)); 154 struct device *bus_find_device(struct bus_type *bus, struct device *start, 155 void *data, 156 int (*match)(struct device *dev, void *data)); 157 struct device *bus_find_device_by_name(struct bus_type *bus, 158 struct device *start, 159 const char *name); 160 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, 161 struct device *hint); 162 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, 163 void *data, int (*fn)(struct device_driver *, void *)); 164 void bus_sort_breadthfirst(struct bus_type *bus, 165 int (*compare)(const struct device *a, 166 const struct device *b)); 167 /* 168 * Bus notifiers: Get notified of addition/removal of devices 169 * and binding/unbinding of drivers to devices. 170 * In the long run, it should be a replacement for the platform 171 * notify hooks. 172 */ 173 struct notifier_block; 174 175 extern int bus_register_notifier(struct bus_type *bus, 176 struct notifier_block *nb); 177 extern int bus_unregister_notifier(struct bus_type *bus, 178 struct notifier_block *nb); 179 180 /* All 4 notifers below get called with the target struct device * 181 * as an argument. Note that those functions are likely to be called 182 * with the device lock held in the core, so be careful. 183 */ 184 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ 185 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ 186 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ 187 #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be 188 bound */ 189 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ 190 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be 191 unbound */ 192 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound 193 from the device */ 194 195 extern struct kset *bus_get_kset(struct bus_type *bus); 196 extern struct klist *bus_get_device_klist(struct bus_type *bus); 197 198 /** 199 * enum probe_type - device driver probe type to try 200 * Device drivers may opt in for special handling of their 201 * respective probe routines. This tells the core what to 202 * expect and prefer. 203 * 204 * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well 205 * whether probed synchronously or asynchronously. 206 * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which 207 * probing order is not essential for booting the system may 208 * opt into executing their probes asynchronously. 209 * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need 210 * their probe routines to run synchronously with driver and 211 * device registration (with the exception of -EPROBE_DEFER 212 * handling - re-probing always ends up being done asynchronously). 213 * 214 * Note that the end goal is to switch the kernel to use asynchronous 215 * probing by default, so annotating drivers with 216 * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us 217 * to speed up boot process while we are validating the rest of the 218 * drivers. 219 */ 220 enum probe_type { 221 PROBE_DEFAULT_STRATEGY, 222 PROBE_PREFER_ASYNCHRONOUS, 223 PROBE_FORCE_SYNCHRONOUS, 224 }; 225 226 /** 227 * struct device_driver - The basic device driver structure 228 * @name: Name of the device driver. 229 * @bus: The bus which the device of this driver belongs to. 230 * @owner: The module owner. 231 * @mod_name: Used for built-in modules. 232 * @suppress_bind_attrs: Disables bind/unbind via sysfs. 233 * @probe_type: Type of the probe (synchronous or asynchronous) to use. 234 * @of_match_table: The open firmware table. 235 * @acpi_match_table: The ACPI match table. 236 * @probe: Called to query the existence of a specific device, 237 * whether this driver can work with it, and bind the driver 238 * to a specific device. 239 * @remove: Called when the device is removed from the system to 240 * unbind a device from this driver. 241 * @shutdown: Called at shut-down time to quiesce the device. 242 * @suspend: Called to put the device to sleep mode. Usually to a 243 * low power state. 244 * @resume: Called to bring a device from sleep mode. 245 * @groups: Default attributes that get created by the driver core 246 * automatically. 247 * @pm: Power management operations of the device which matched 248 * this driver. 249 * @p: Driver core's private data, no one other than the driver 250 * core can touch this. 251 * 252 * The device driver-model tracks all of the drivers known to the system. 253 * The main reason for this tracking is to enable the driver core to match 254 * up drivers with new devices. Once drivers are known objects within the 255 * system, however, a number of other things become possible. Device drivers 256 * can export information and configuration variables that are independent 257 * of any specific device. 258 */ 259 struct device_driver { 260 const char *name; 261 struct bus_type *bus; 262 263 struct module *owner; 264 const char *mod_name; /* used for built-in modules */ 265 266 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ 267 enum probe_type probe_type; 268 269 const struct of_device_id *of_match_table; 270 const struct acpi_device_id *acpi_match_table; 271 272 int (*probe) (struct device *dev); 273 int (*remove) (struct device *dev); 274 void (*shutdown) (struct device *dev); 275 int (*suspend) (struct device *dev, pm_message_t state); 276 int (*resume) (struct device *dev); 277 const struct attribute_group **groups; 278 279 const struct dev_pm_ops *pm; 280 281 struct driver_private *p; 282 }; 283 284 285 extern int __must_check driver_register(struct device_driver *drv); 286 extern void driver_unregister(struct device_driver *drv); 287 288 extern struct device_driver *driver_find(const char *name, 289 struct bus_type *bus); 290 extern int driver_probe_done(void); 291 extern void wait_for_device_probe(void); 292 293 294 /* sysfs interface for exporting driver attributes */ 295 296 struct driver_attribute { 297 struct attribute attr; 298 ssize_t (*show)(struct device_driver *driver, char *buf); 299 ssize_t (*store)(struct device_driver *driver, const char *buf, 300 size_t count); 301 }; 302 303 #define DRIVER_ATTR(_name, _mode, _show, _store) \ 304 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) 305 #define DRIVER_ATTR_RW(_name) \ 306 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) 307 #define DRIVER_ATTR_RO(_name) \ 308 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) 309 #define DRIVER_ATTR_WO(_name) \ 310 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name) 311 312 extern int __must_check driver_create_file(struct device_driver *driver, 313 const struct driver_attribute *attr); 314 extern void driver_remove_file(struct device_driver *driver, 315 const struct driver_attribute *attr); 316 317 extern int __must_check driver_for_each_device(struct device_driver *drv, 318 struct device *start, 319 void *data, 320 int (*fn)(struct device *dev, 321 void *)); 322 struct device *driver_find_device(struct device_driver *drv, 323 struct device *start, void *data, 324 int (*match)(struct device *dev, void *data)); 325 326 /** 327 * struct subsys_interface - interfaces to device functions 328 * @name: name of the device function 329 * @subsys: subsytem of the devices to attach to 330 * @node: the list of functions registered at the subsystem 331 * @add_dev: device hookup to device function handler 332 * @remove_dev: device hookup to device function handler 333 * 334 * Simple interfaces attached to a subsystem. Multiple interfaces can 335 * attach to a subsystem and its devices. Unlike drivers, they do not 336 * exclusively claim or control devices. Interfaces usually represent 337 * a specific functionality of a subsystem/class of devices. 338 */ 339 struct subsys_interface { 340 const char *name; 341 struct bus_type *subsys; 342 struct list_head node; 343 int (*add_dev)(struct device *dev, struct subsys_interface *sif); 344 void (*remove_dev)(struct device *dev, struct subsys_interface *sif); 345 }; 346 347 int subsys_interface_register(struct subsys_interface *sif); 348 void subsys_interface_unregister(struct subsys_interface *sif); 349 350 int subsys_system_register(struct bus_type *subsys, 351 const struct attribute_group **groups); 352 int subsys_virtual_register(struct bus_type *subsys, 353 const struct attribute_group **groups); 354 355 /** 356 * struct class - device classes 357 * @name: Name of the class. 358 * @owner: The module owner. 359 * @class_attrs: Default attributes of this class. 360 * @dev_groups: Default attributes of the devices that belong to the class. 361 * @dev_kobj: The kobject that represents this class and links it into the hierarchy. 362 * @dev_uevent: Called when a device is added, removed from this class, or a 363 * few other things that generate uevents to add the environment 364 * variables. 365 * @devnode: Callback to provide the devtmpfs. 366 * @class_release: Called to release this class. 367 * @dev_release: Called to release the device. 368 * @suspend: Used to put the device to sleep mode, usually to a low power 369 * state. 370 * @resume: Used to bring the device from the sleep mode. 371 * @ns_type: Callbacks so sysfs can detemine namespaces. 372 * @namespace: Namespace of the device belongs to this class. 373 * @pm: The default device power management operations of this class. 374 * @p: The private data of the driver core, no one other than the 375 * driver core can touch this. 376 * 377 * A class is a higher-level view of a device that abstracts out low-level 378 * implementation details. Drivers may see a SCSI disk or an ATA disk, but, 379 * at the class level, they are all simply disks. Classes allow user space 380 * to work with devices based on what they do, rather than how they are 381 * connected or how they work. 382 */ 383 struct class { 384 const char *name; 385 struct module *owner; 386 387 struct class_attribute *class_attrs; 388 const struct attribute_group **dev_groups; 389 struct kobject *dev_kobj; 390 391 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); 392 char *(*devnode)(struct device *dev, umode_t *mode); 393 394 void (*class_release)(struct class *class); 395 void (*dev_release)(struct device *dev); 396 397 int (*suspend)(struct device *dev, pm_message_t state); 398 int (*resume)(struct device *dev); 399 400 const struct kobj_ns_type_operations *ns_type; 401 const void *(*namespace)(struct device *dev); 402 403 const struct dev_pm_ops *pm; 404 405 struct subsys_private *p; 406 }; 407 408 struct class_dev_iter { 409 struct klist_iter ki; 410 const struct device_type *type; 411 }; 412 413 extern struct kobject *sysfs_dev_block_kobj; 414 extern struct kobject *sysfs_dev_char_kobj; 415 extern int __must_check __class_register(struct class *class, 416 struct lock_class_key *key); 417 extern void class_unregister(struct class *class); 418 419 /* This is a #define to keep the compiler from merging different 420 * instances of the __key variable */ 421 #define class_register(class) \ 422 ({ \ 423 static struct lock_class_key __key; \ 424 __class_register(class, &__key); \ 425 }) 426 427 struct class_compat; 428 struct class_compat *class_compat_register(const char *name); 429 void class_compat_unregister(struct class_compat *cls); 430 int class_compat_create_link(struct class_compat *cls, struct device *dev, 431 struct device *device_link); 432 void class_compat_remove_link(struct class_compat *cls, struct device *dev, 433 struct device *device_link); 434 435 extern void class_dev_iter_init(struct class_dev_iter *iter, 436 struct class *class, 437 struct device *start, 438 const struct device_type *type); 439 extern struct device *class_dev_iter_next(struct class_dev_iter *iter); 440 extern void class_dev_iter_exit(struct class_dev_iter *iter); 441 442 extern int class_for_each_device(struct class *class, struct device *start, 443 void *data, 444 int (*fn)(struct device *dev, void *data)); 445 extern struct device *class_find_device(struct class *class, 446 struct device *start, const void *data, 447 int (*match)(struct device *, const void *)); 448 449 struct class_attribute { 450 struct attribute attr; 451 ssize_t (*show)(struct class *class, struct class_attribute *attr, 452 char *buf); 453 ssize_t (*store)(struct class *class, struct class_attribute *attr, 454 const char *buf, size_t count); 455 }; 456 457 #define CLASS_ATTR(_name, _mode, _show, _store) \ 458 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) 459 #define CLASS_ATTR_RW(_name) \ 460 struct class_attribute class_attr_##_name = __ATTR_RW(_name) 461 #define CLASS_ATTR_RO(_name) \ 462 struct class_attribute class_attr_##_name = __ATTR_RO(_name) 463 464 extern int __must_check class_create_file_ns(struct class *class, 465 const struct class_attribute *attr, 466 const void *ns); 467 extern void class_remove_file_ns(struct class *class, 468 const struct class_attribute *attr, 469 const void *ns); 470 471 static inline int __must_check class_create_file(struct class *class, 472 const struct class_attribute *attr) 473 { 474 return class_create_file_ns(class, attr, NULL); 475 } 476 477 static inline void class_remove_file(struct class *class, 478 const struct class_attribute *attr) 479 { 480 return class_remove_file_ns(class, attr, NULL); 481 } 482 483 /* Simple class attribute that is just a static string */ 484 struct class_attribute_string { 485 struct class_attribute attr; 486 char *str; 487 }; 488 489 /* Currently read-only only */ 490 #define _CLASS_ATTR_STRING(_name, _mode, _str) \ 491 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } 492 #define CLASS_ATTR_STRING(_name, _mode, _str) \ 493 struct class_attribute_string class_attr_##_name = \ 494 _CLASS_ATTR_STRING(_name, _mode, _str) 495 496 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, 497 char *buf); 498 499 struct class_interface { 500 struct list_head node; 501 struct class *class; 502 503 int (*add_dev) (struct device *, struct class_interface *); 504 void (*remove_dev) (struct device *, struct class_interface *); 505 }; 506 507 extern int __must_check class_interface_register(struct class_interface *); 508 extern void class_interface_unregister(struct class_interface *); 509 510 extern struct class * __must_check __class_create(struct module *owner, 511 const char *name, 512 struct lock_class_key *key); 513 extern void class_destroy(struct class *cls); 514 515 /* This is a #define to keep the compiler from merging different 516 * instances of the __key variable */ 517 #define class_create(owner, name) \ 518 ({ \ 519 static struct lock_class_key __key; \ 520 __class_create(owner, name, &__key); \ 521 }) 522 523 /* 524 * The type of device, "struct device" is embedded in. A class 525 * or bus can contain devices of different types 526 * like "partitions" and "disks", "mouse" and "event". 527 * This identifies the device type and carries type-specific 528 * information, equivalent to the kobj_type of a kobject. 529 * If "name" is specified, the uevent will contain it in 530 * the DEVTYPE variable. 531 */ 532 struct device_type { 533 const char *name; 534 const struct attribute_group **groups; 535 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 536 char *(*devnode)(struct device *dev, umode_t *mode, 537 kuid_t *uid, kgid_t *gid); 538 void (*release)(struct device *dev); 539 540 const struct dev_pm_ops *pm; 541 }; 542 543 /* interface for exporting device attributes */ 544 struct device_attribute { 545 struct attribute attr; 546 ssize_t (*show)(struct device *dev, struct device_attribute *attr, 547 char *buf); 548 ssize_t (*store)(struct device *dev, struct device_attribute *attr, 549 const char *buf, size_t count); 550 }; 551 552 struct dev_ext_attribute { 553 struct device_attribute attr; 554 void *var; 555 }; 556 557 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, 558 char *buf); 559 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, 560 const char *buf, size_t count); 561 ssize_t device_show_int(struct device *dev, struct device_attribute *attr, 562 char *buf); 563 ssize_t device_store_int(struct device *dev, struct device_attribute *attr, 564 const char *buf, size_t count); 565 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 566 char *buf); 567 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 568 const char *buf, size_t count); 569 570 #define DEVICE_ATTR(_name, _mode, _show, _store) \ 571 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 572 #define DEVICE_ATTR_RW(_name) \ 573 struct device_attribute dev_attr_##_name = __ATTR_RW(_name) 574 #define DEVICE_ATTR_RO(_name) \ 575 struct device_attribute dev_attr_##_name = __ATTR_RO(_name) 576 #define DEVICE_ATTR_WO(_name) \ 577 struct device_attribute dev_attr_##_name = __ATTR_WO(_name) 578 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ 579 struct dev_ext_attribute dev_attr_##_name = \ 580 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } 581 #define DEVICE_INT_ATTR(_name, _mode, _var) \ 582 struct dev_ext_attribute dev_attr_##_name = \ 583 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } 584 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ 585 struct dev_ext_attribute dev_attr_##_name = \ 586 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } 587 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ 588 struct device_attribute dev_attr_##_name = \ 589 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) 590 591 extern int device_create_file(struct device *device, 592 const struct device_attribute *entry); 593 extern void device_remove_file(struct device *dev, 594 const struct device_attribute *attr); 595 extern bool device_remove_file_self(struct device *dev, 596 const struct device_attribute *attr); 597 extern int __must_check device_create_bin_file(struct device *dev, 598 const struct bin_attribute *attr); 599 extern void device_remove_bin_file(struct device *dev, 600 const struct bin_attribute *attr); 601 602 /* device resource management */ 603 typedef void (*dr_release_t)(struct device *dev, void *res); 604 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); 605 606 #ifdef CONFIG_DEBUG_DEVRES 607 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, 608 int nid, const char *name); 609 #define devres_alloc(release, size, gfp) \ 610 __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) 611 #define devres_alloc_node(release, size, gfp, nid) \ 612 __devres_alloc_node(release, size, gfp, nid, #release) 613 #else 614 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, 615 int nid); 616 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) 617 { 618 return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); 619 } 620 #endif 621 622 extern void devres_for_each_res(struct device *dev, dr_release_t release, 623 dr_match_t match, void *match_data, 624 void (*fn)(struct device *, void *, void *), 625 void *data); 626 extern void devres_free(void *res); 627 extern void devres_add(struct device *dev, void *res); 628 extern void *devres_find(struct device *dev, dr_release_t release, 629 dr_match_t match, void *match_data); 630 extern void *devres_get(struct device *dev, void *new_res, 631 dr_match_t match, void *match_data); 632 extern void *devres_remove(struct device *dev, dr_release_t release, 633 dr_match_t match, void *match_data); 634 extern int devres_destroy(struct device *dev, dr_release_t release, 635 dr_match_t match, void *match_data); 636 extern int devres_release(struct device *dev, dr_release_t release, 637 dr_match_t match, void *match_data); 638 639 /* devres group */ 640 extern void * __must_check devres_open_group(struct device *dev, void *id, 641 gfp_t gfp); 642 extern void devres_close_group(struct device *dev, void *id); 643 extern void devres_remove_group(struct device *dev, void *id); 644 extern int devres_release_group(struct device *dev, void *id); 645 646 /* managed devm_k.alloc/kfree for device drivers */ 647 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); 648 extern __printf(3, 0) 649 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 650 va_list ap); 651 extern __printf(3, 4) 652 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); 653 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 654 { 655 return devm_kmalloc(dev, size, gfp | __GFP_ZERO); 656 } 657 static inline void *devm_kmalloc_array(struct device *dev, 658 size_t n, size_t size, gfp_t flags) 659 { 660 if (size != 0 && n > SIZE_MAX / size) 661 return NULL; 662 return devm_kmalloc(dev, n * size, flags); 663 } 664 static inline void *devm_kcalloc(struct device *dev, 665 size_t n, size_t size, gfp_t flags) 666 { 667 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); 668 } 669 extern void devm_kfree(struct device *dev, void *p); 670 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); 671 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, 672 gfp_t gfp); 673 674 extern unsigned long devm_get_free_pages(struct device *dev, 675 gfp_t gfp_mask, unsigned int order); 676 extern void devm_free_pages(struct device *dev, unsigned long addr); 677 678 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); 679 680 /* allows to add/remove a custom action to devres stack */ 681 int devm_add_action(struct device *dev, void (*action)(void *), void *data); 682 void devm_remove_action(struct device *dev, void (*action)(void *), void *data); 683 684 struct device_dma_parameters { 685 /* 686 * a low level driver may set these to teach IOMMU code about 687 * sg limitations. 688 */ 689 unsigned int max_segment_size; 690 unsigned long segment_boundary_mask; 691 }; 692 693 /** 694 * struct device - The basic device structure 695 * @parent: The device's "parent" device, the device to which it is attached. 696 * In most cases, a parent device is some sort of bus or host 697 * controller. If parent is NULL, the device, is a top-level device, 698 * which is not usually what you want. 699 * @p: Holds the private data of the driver core portions of the device. 700 * See the comment of the struct device_private for detail. 701 * @kobj: A top-level, abstract class from which other classes are derived. 702 * @init_name: Initial name of the device. 703 * @type: The type of device. 704 * This identifies the device type and carries type-specific 705 * information. 706 * @mutex: Mutex to synchronize calls to its driver. 707 * @bus: Type of bus device is on. 708 * @driver: Which driver has allocated this 709 * @platform_data: Platform data specific to the device. 710 * Example: For devices on custom boards, as typical of embedded 711 * and SOC based hardware, Linux often uses platform_data to point 712 * to board-specific structures describing devices and how they 713 * are wired. That can include what ports are available, chip 714 * variants, which GPIO pins act in what additional roles, and so 715 * on. This shrinks the "Board Support Packages" (BSPs) and 716 * minimizes board-specific #ifdefs in drivers. 717 * @driver_data: Private pointer for driver specific info. 718 * @power: For device power management. 719 * See Documentation/power/devices.txt for details. 720 * @pm_domain: Provide callbacks that are executed during system suspend, 721 * hibernation, system resume and during runtime PM transitions 722 * along with subsystem-level and driver-level callbacks. 723 * @pins: For device pin management. 724 * See Documentation/pinctrl.txt for details. 725 * @msi_list: Hosts MSI descriptors 726 * @msi_domain: The generic MSI domain this device is using. 727 * @numa_node: NUMA node this device is close to. 728 * @dma_mask: Dma mask (if dma'ble device). 729 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 730 * hardware supports 64-bit addresses for consistent allocations 731 * such descriptors. 732 * @dma_pfn_offset: offset of DMA memory range relatively of RAM 733 * @dma_parms: A low level driver may set these to teach IOMMU code about 734 * segment limitations. 735 * @dma_pools: Dma pools (if dma'ble device). 736 * @dma_mem: Internal for coherent mem override. 737 * @cma_area: Contiguous memory area for dma allocations 738 * @archdata: For arch-specific additions. 739 * @of_node: Associated device tree node. 740 * @fwnode: Associated device node supplied by platform firmware. 741 * @devt: For creating the sysfs "dev". 742 * @id: device instance 743 * @devres_lock: Spinlock to protect the resource of the device. 744 * @devres_head: The resources list of the device. 745 * @knode_class: The node used to add the device to the class list. 746 * @class: The class of the device. 747 * @groups: Optional attribute groups. 748 * @release: Callback to free the device after all references have 749 * gone away. This should be set by the allocator of the 750 * device (i.e. the bus driver that discovered the device). 751 * @iommu_group: IOMMU group the device belongs to. 752 * 753 * @offline_disabled: If set, the device is permanently online. 754 * @offline: Set after successful invocation of bus type's .offline(). 755 * 756 * At the lowest level, every device in a Linux system is represented by an 757 * instance of struct device. The device structure contains the information 758 * that the device model core needs to model the system. Most subsystems, 759 * however, track additional information about the devices they host. As a 760 * result, it is rare for devices to be represented by bare device structures; 761 * instead, that structure, like kobject structures, is usually embedded within 762 * a higher-level representation of the device. 763 */ 764 struct device { 765 struct device *parent; 766 767 struct device_private *p; 768 769 struct kobject kobj; 770 const char *init_name; /* initial name of the device */ 771 const struct device_type *type; 772 773 struct mutex mutex; /* mutex to synchronize calls to 774 * its driver. 775 */ 776 777 struct bus_type *bus; /* type of bus device is on */ 778 struct device_driver *driver; /* which driver has allocated this 779 device */ 780 void *platform_data; /* Platform specific data, device 781 core doesn't touch it */ 782 void *driver_data; /* Driver data, set and get with 783 dev_set/get_drvdata */ 784 struct dev_pm_info power; 785 struct dev_pm_domain *pm_domain; 786 787 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 788 struct irq_domain *msi_domain; 789 #endif 790 #ifdef CONFIG_PINCTRL 791 struct dev_pin_info *pins; 792 #endif 793 #ifdef CONFIG_GENERIC_MSI_IRQ 794 struct list_head msi_list; 795 #endif 796 797 #ifdef CONFIG_NUMA 798 int numa_node; /* NUMA node this device is close to */ 799 #endif 800 u64 *dma_mask; /* dma mask (if dma'able device) */ 801 u64 coherent_dma_mask;/* Like dma_mask, but for 802 alloc_coherent mappings as 803 not all hardware supports 804 64 bit addresses for consistent 805 allocations such descriptors. */ 806 unsigned long dma_pfn_offset; 807 808 struct device_dma_parameters *dma_parms; 809 810 struct list_head dma_pools; /* dma pools (if dma'ble) */ 811 812 struct dma_coherent_mem *dma_mem; /* internal for coherent mem 813 override */ 814 #ifdef CONFIG_DMA_CMA 815 struct cma *cma_area; /* contiguous memory area for dma 816 allocations */ 817 #endif 818 /* arch specific additions */ 819 struct dev_archdata archdata; 820 821 struct device_node *of_node; /* associated device tree node */ 822 struct fwnode_handle *fwnode; /* firmware device node */ 823 824 dev_t devt; /* dev_t, creates the sysfs "dev" */ 825 u32 id; /* device instance */ 826 827 spinlock_t devres_lock; 828 struct list_head devres_head; 829 830 struct klist_node knode_class; 831 struct class *class; 832 const struct attribute_group **groups; /* optional groups */ 833 834 void (*release)(struct device *dev); 835 struct iommu_group *iommu_group; 836 837 bool offline_disabled:1; 838 bool offline:1; 839 }; 840 841 static inline struct device *kobj_to_dev(struct kobject *kobj) 842 { 843 return container_of(kobj, struct device, kobj); 844 } 845 846 /* Get the wakeup routines, which depend on struct device */ 847 #include <linux/pm_wakeup.h> 848 849 static inline const char *dev_name(const struct device *dev) 850 { 851 /* Use the init name until the kobject becomes available */ 852 if (dev->init_name) 853 return dev->init_name; 854 855 return kobject_name(&dev->kobj); 856 } 857 858 extern __printf(2, 3) 859 int dev_set_name(struct device *dev, const char *name, ...); 860 861 #ifdef CONFIG_NUMA 862 static inline int dev_to_node(struct device *dev) 863 { 864 return dev->numa_node; 865 } 866 static inline void set_dev_node(struct device *dev, int node) 867 { 868 dev->numa_node = node; 869 } 870 #else 871 static inline int dev_to_node(struct device *dev) 872 { 873 return -1; 874 } 875 static inline void set_dev_node(struct device *dev, int node) 876 { 877 } 878 #endif 879 880 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) 881 { 882 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 883 return dev->msi_domain; 884 #else 885 return NULL; 886 #endif 887 } 888 889 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) 890 { 891 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 892 dev->msi_domain = d; 893 #endif 894 } 895 896 static inline void *dev_get_drvdata(const struct device *dev) 897 { 898 return dev->driver_data; 899 } 900 901 static inline void dev_set_drvdata(struct device *dev, void *data) 902 { 903 dev->driver_data = data; 904 } 905 906 static inline struct pm_subsys_data *dev_to_psd(struct device *dev) 907 { 908 return dev ? dev->power.subsys_data : NULL; 909 } 910 911 static inline unsigned int dev_get_uevent_suppress(const struct device *dev) 912 { 913 return dev->kobj.uevent_suppress; 914 } 915 916 static inline void dev_set_uevent_suppress(struct device *dev, int val) 917 { 918 dev->kobj.uevent_suppress = val; 919 } 920 921 static inline int device_is_registered(struct device *dev) 922 { 923 return dev->kobj.state_in_sysfs; 924 } 925 926 static inline void device_enable_async_suspend(struct device *dev) 927 { 928 if (!dev->power.is_prepared) 929 dev->power.async_suspend = true; 930 } 931 932 static inline void device_disable_async_suspend(struct device *dev) 933 { 934 if (!dev->power.is_prepared) 935 dev->power.async_suspend = false; 936 } 937 938 static inline bool device_async_suspend_enabled(struct device *dev) 939 { 940 return !!dev->power.async_suspend; 941 } 942 943 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) 944 { 945 dev->power.ignore_children = enable; 946 } 947 948 static inline void dev_pm_syscore_device(struct device *dev, bool val) 949 { 950 #ifdef CONFIG_PM_SLEEP 951 dev->power.syscore = val; 952 #endif 953 } 954 955 static inline void device_lock(struct device *dev) 956 { 957 mutex_lock(&dev->mutex); 958 } 959 960 static inline int device_trylock(struct device *dev) 961 { 962 return mutex_trylock(&dev->mutex); 963 } 964 965 static inline void device_unlock(struct device *dev) 966 { 967 mutex_unlock(&dev->mutex); 968 } 969 970 static inline void device_lock_assert(struct device *dev) 971 { 972 lockdep_assert_held(&dev->mutex); 973 } 974 975 static inline struct device_node *dev_of_node(struct device *dev) 976 { 977 if (!IS_ENABLED(CONFIG_OF)) 978 return NULL; 979 return dev->of_node; 980 } 981 982 void driver_init(void); 983 984 /* 985 * High level routines for use by the bus drivers 986 */ 987 extern int __must_check device_register(struct device *dev); 988 extern void device_unregister(struct device *dev); 989 extern void device_initialize(struct device *dev); 990 extern int __must_check device_add(struct device *dev); 991 extern void device_del(struct device *dev); 992 extern int device_for_each_child(struct device *dev, void *data, 993 int (*fn)(struct device *dev, void *data)); 994 extern int device_for_each_child_reverse(struct device *dev, void *data, 995 int (*fn)(struct device *dev, void *data)); 996 extern struct device *device_find_child(struct device *dev, void *data, 997 int (*match)(struct device *dev, void *data)); 998 extern int device_rename(struct device *dev, const char *new_name); 999 extern int device_move(struct device *dev, struct device *new_parent, 1000 enum dpm_order dpm_order); 1001 extern const char *device_get_devnode(struct device *dev, 1002 umode_t *mode, kuid_t *uid, kgid_t *gid, 1003 const char **tmp); 1004 1005 static inline bool device_supports_offline(struct device *dev) 1006 { 1007 return dev->bus && dev->bus->offline && dev->bus->online; 1008 } 1009 1010 extern void lock_device_hotplug(void); 1011 extern void unlock_device_hotplug(void); 1012 extern int lock_device_hotplug_sysfs(void); 1013 extern int device_offline(struct device *dev); 1014 extern int device_online(struct device *dev); 1015 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1016 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1017 1018 /* 1019 * Root device objects for grouping under /sys/devices 1020 */ 1021 extern struct device *__root_device_register(const char *name, 1022 struct module *owner); 1023 1024 /* This is a macro to avoid include problems with THIS_MODULE */ 1025 #define root_device_register(name) \ 1026 __root_device_register(name, THIS_MODULE) 1027 1028 extern void root_device_unregister(struct device *root); 1029 1030 static inline void *dev_get_platdata(const struct device *dev) 1031 { 1032 return dev->platform_data; 1033 } 1034 1035 /* 1036 * Manual binding of a device to driver. See drivers/base/bus.c 1037 * for information on use. 1038 */ 1039 extern int __must_check device_bind_driver(struct device *dev); 1040 extern void device_release_driver(struct device *dev); 1041 extern int __must_check device_attach(struct device *dev); 1042 extern int __must_check driver_attach(struct device_driver *drv); 1043 extern void device_initial_probe(struct device *dev); 1044 extern int __must_check device_reprobe(struct device *dev); 1045 1046 /* 1047 * Easy functions for dynamically creating devices on the fly 1048 */ 1049 extern __printf(5, 0) 1050 struct device *device_create_vargs(struct class *cls, struct device *parent, 1051 dev_t devt, void *drvdata, 1052 const char *fmt, va_list vargs); 1053 extern __printf(5, 6) 1054 struct device *device_create(struct class *cls, struct device *parent, 1055 dev_t devt, void *drvdata, 1056 const char *fmt, ...); 1057 extern __printf(6, 7) 1058 struct device *device_create_with_groups(struct class *cls, 1059 struct device *parent, dev_t devt, void *drvdata, 1060 const struct attribute_group **groups, 1061 const char *fmt, ...); 1062 extern void device_destroy(struct class *cls, dev_t devt); 1063 1064 /* 1065 * Platform "fixup" functions - allow the platform to have their say 1066 * about devices and actions that the general device layer doesn't 1067 * know about. 1068 */ 1069 /* Notify platform of device discovery */ 1070 extern int (*platform_notify)(struct device *dev); 1071 1072 extern int (*platform_notify_remove)(struct device *dev); 1073 1074 1075 /* 1076 * get_device - atomically increment the reference count for the device. 1077 * 1078 */ 1079 extern struct device *get_device(struct device *dev); 1080 extern void put_device(struct device *dev); 1081 1082 #ifdef CONFIG_DEVTMPFS 1083 extern int devtmpfs_create_node(struct device *dev); 1084 extern int devtmpfs_delete_node(struct device *dev); 1085 extern int devtmpfs_mount(const char *mntdir); 1086 #else 1087 static inline int devtmpfs_create_node(struct device *dev) { return 0; } 1088 static inline int devtmpfs_delete_node(struct device *dev) { return 0; } 1089 static inline int devtmpfs_mount(const char *mountpoint) { return 0; } 1090 #endif 1091 1092 /* drivers/base/power/shutdown.c */ 1093 extern void device_shutdown(void); 1094 1095 /* debugging and troubleshooting/diagnostic helpers. */ 1096 extern const char *dev_driver_string(const struct device *dev); 1097 1098 1099 #ifdef CONFIG_PRINTK 1100 1101 extern __printf(3, 0) 1102 int dev_vprintk_emit(int level, const struct device *dev, 1103 const char *fmt, va_list args); 1104 extern __printf(3, 4) 1105 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); 1106 1107 extern __printf(3, 4) 1108 void dev_printk(const char *level, const struct device *dev, 1109 const char *fmt, ...); 1110 extern __printf(2, 3) 1111 void dev_emerg(const struct device *dev, const char *fmt, ...); 1112 extern __printf(2, 3) 1113 void dev_alert(const struct device *dev, const char *fmt, ...); 1114 extern __printf(2, 3) 1115 void dev_crit(const struct device *dev, const char *fmt, ...); 1116 extern __printf(2, 3) 1117 void dev_err(const struct device *dev, const char *fmt, ...); 1118 extern __printf(2, 3) 1119 void dev_warn(const struct device *dev, const char *fmt, ...); 1120 extern __printf(2, 3) 1121 void dev_notice(const struct device *dev, const char *fmt, ...); 1122 extern __printf(2, 3) 1123 void _dev_info(const struct device *dev, const char *fmt, ...); 1124 1125 #else 1126 1127 static inline __printf(3, 0) 1128 int dev_vprintk_emit(int level, const struct device *dev, 1129 const char *fmt, va_list args) 1130 { return 0; } 1131 static inline __printf(3, 4) 1132 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 1133 { return 0; } 1134 1135 static inline void __dev_printk(const char *level, const struct device *dev, 1136 struct va_format *vaf) 1137 {} 1138 static inline __printf(3, 4) 1139 void dev_printk(const char *level, const struct device *dev, 1140 const char *fmt, ...) 1141 {} 1142 1143 static inline __printf(2, 3) 1144 void dev_emerg(const struct device *dev, const char *fmt, ...) 1145 {} 1146 static inline __printf(2, 3) 1147 void dev_crit(const struct device *dev, const char *fmt, ...) 1148 {} 1149 static inline __printf(2, 3) 1150 void dev_alert(const struct device *dev, const char *fmt, ...) 1151 {} 1152 static inline __printf(2, 3) 1153 void dev_err(const struct device *dev, const char *fmt, ...) 1154 {} 1155 static inline __printf(2, 3) 1156 void dev_warn(const struct device *dev, const char *fmt, ...) 1157 {} 1158 static inline __printf(2, 3) 1159 void dev_notice(const struct device *dev, const char *fmt, ...) 1160 {} 1161 static inline __printf(2, 3) 1162 void _dev_info(const struct device *dev, const char *fmt, ...) 1163 {} 1164 1165 #endif 1166 1167 /* 1168 * Stupid hackaround for existing uses of non-printk uses dev_info 1169 * 1170 * Note that the definition of dev_info below is actually _dev_info 1171 * and a macro is used to avoid redefining dev_info 1172 */ 1173 1174 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) 1175 1176 #if defined(CONFIG_DYNAMIC_DEBUG) 1177 #define dev_dbg(dev, format, ...) \ 1178 do { \ 1179 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ 1180 } while (0) 1181 #elif defined(DEBUG) 1182 #define dev_dbg(dev, format, arg...) \ 1183 dev_printk(KERN_DEBUG, dev, format, ##arg) 1184 #else 1185 #define dev_dbg(dev, format, arg...) \ 1186 ({ \ 1187 if (0) \ 1188 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1189 }) 1190 #endif 1191 1192 #ifdef CONFIG_PRINTK 1193 #define dev_level_once(dev_level, dev, fmt, ...) \ 1194 do { \ 1195 static bool __print_once __read_mostly; \ 1196 \ 1197 if (!__print_once) { \ 1198 __print_once = true; \ 1199 dev_level(dev, fmt, ##__VA_ARGS__); \ 1200 } \ 1201 } while (0) 1202 #else 1203 #define dev_level_once(dev_level, dev, fmt, ...) \ 1204 do { \ 1205 if (0) \ 1206 dev_level(dev, fmt, ##__VA_ARGS__); \ 1207 } while (0) 1208 #endif 1209 1210 #define dev_emerg_once(dev, fmt, ...) \ 1211 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) 1212 #define dev_alert_once(dev, fmt, ...) \ 1213 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) 1214 #define dev_crit_once(dev, fmt, ...) \ 1215 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) 1216 #define dev_err_once(dev, fmt, ...) \ 1217 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) 1218 #define dev_warn_once(dev, fmt, ...) \ 1219 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) 1220 #define dev_notice_once(dev, fmt, ...) \ 1221 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) 1222 #define dev_info_once(dev, fmt, ...) \ 1223 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) 1224 #define dev_dbg_once(dev, fmt, ...) \ 1225 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) 1226 1227 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ 1228 do { \ 1229 static DEFINE_RATELIMIT_STATE(_rs, \ 1230 DEFAULT_RATELIMIT_INTERVAL, \ 1231 DEFAULT_RATELIMIT_BURST); \ 1232 if (__ratelimit(&_rs)) \ 1233 dev_level(dev, fmt, ##__VA_ARGS__); \ 1234 } while (0) 1235 1236 #define dev_emerg_ratelimited(dev, fmt, ...) \ 1237 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) 1238 #define dev_alert_ratelimited(dev, fmt, ...) \ 1239 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) 1240 #define dev_crit_ratelimited(dev, fmt, ...) \ 1241 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) 1242 #define dev_err_ratelimited(dev, fmt, ...) \ 1243 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) 1244 #define dev_warn_ratelimited(dev, fmt, ...) \ 1245 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) 1246 #define dev_notice_ratelimited(dev, fmt, ...) \ 1247 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) 1248 #define dev_info_ratelimited(dev, fmt, ...) \ 1249 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) 1250 #if defined(CONFIG_DYNAMIC_DEBUG) 1251 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 1252 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1253 do { \ 1254 static DEFINE_RATELIMIT_STATE(_rs, \ 1255 DEFAULT_RATELIMIT_INTERVAL, \ 1256 DEFAULT_RATELIMIT_BURST); \ 1257 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 1258 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 1259 __ratelimit(&_rs)) \ 1260 __dynamic_dev_dbg(&descriptor, dev, fmt, \ 1261 ##__VA_ARGS__); \ 1262 } while (0) 1263 #elif defined(DEBUG) 1264 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1265 do { \ 1266 static DEFINE_RATELIMIT_STATE(_rs, \ 1267 DEFAULT_RATELIMIT_INTERVAL, \ 1268 DEFAULT_RATELIMIT_BURST); \ 1269 if (__ratelimit(&_rs)) \ 1270 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 1271 } while (0) 1272 #else 1273 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1274 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) 1275 #endif 1276 1277 #ifdef VERBOSE_DEBUG 1278 #define dev_vdbg dev_dbg 1279 #else 1280 #define dev_vdbg(dev, format, arg...) \ 1281 ({ \ 1282 if (0) \ 1283 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1284 }) 1285 #endif 1286 1287 /* 1288 * dev_WARN*() acts like dev_printk(), but with the key difference of 1289 * using WARN/WARN_ONCE to include file/line information and a backtrace. 1290 */ 1291 #define dev_WARN(dev, format, arg...) \ 1292 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); 1293 1294 #define dev_WARN_ONCE(dev, condition, format, arg...) \ 1295 WARN_ONCE(condition, "%s %s: " format, \ 1296 dev_driver_string(dev), dev_name(dev), ## arg) 1297 1298 /* Create alias, so I can be autoloaded. */ 1299 #define MODULE_ALIAS_CHARDEV(major,minor) \ 1300 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) 1301 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ 1302 MODULE_ALIAS("char-major-" __stringify(major) "-*") 1303 1304 #ifdef CONFIG_SYSFS_DEPRECATED 1305 extern long sysfs_deprecated; 1306 #else 1307 #define sysfs_deprecated 0 1308 #endif 1309 1310 /** 1311 * module_driver() - Helper macro for drivers that don't do anything 1312 * special in module init/exit. This eliminates a lot of boilerplate. 1313 * Each module may only use this macro once, and calling it replaces 1314 * module_init() and module_exit(). 1315 * 1316 * @__driver: driver name 1317 * @__register: register function for this driver type 1318 * @__unregister: unregister function for this driver type 1319 * @...: Additional arguments to be passed to __register and __unregister. 1320 * 1321 * Use this macro to construct bus specific macros for registering 1322 * drivers, and do not use it on its own. 1323 */ 1324 #define module_driver(__driver, __register, __unregister, ...) \ 1325 static int __init __driver##_init(void) \ 1326 { \ 1327 return __register(&(__driver) , ##__VA_ARGS__); \ 1328 } \ 1329 module_init(__driver##_init); \ 1330 static void __exit __driver##_exit(void) \ 1331 { \ 1332 __unregister(&(__driver) , ##__VA_ARGS__); \ 1333 } \ 1334 module_exit(__driver##_exit); 1335 1336 /** 1337 * builtin_driver() - Helper macro for drivers that don't do anything 1338 * special in init and have no exit. This eliminates some boilerplate. 1339 * Each driver may only use this macro once, and calling it replaces 1340 * device_initcall (or in some cases, the legacy __initcall). This is 1341 * meant to be a direct parallel of module_driver() above but without 1342 * the __exit stuff that is not used for builtin cases. 1343 * 1344 * @__driver: driver name 1345 * @__register: register function for this driver type 1346 * @...: Additional arguments to be passed to __register 1347 * 1348 * Use this macro to construct bus specific macros for registering 1349 * drivers, and do not use it on its own. 1350 */ 1351 #define builtin_driver(__driver, __register, ...) \ 1352 static int __init __driver##_init(void) \ 1353 { \ 1354 return __register(&(__driver) , ##__VA_ARGS__); \ 1355 } \ 1356 device_initcall(__driver##_init); 1357 1358 #endif /* _DEVICE_H_ */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Kernel Module Rule Verifier Verdict Status Timestamp Bug report
linux-4.4-rc1.tar.xz drivers/acpi/nfit.ko 32_7a CPAchecker Bug Fixed 2015-12-11 23:29:51 L0212

Comment

Reported: 11 Dec 2015

[Home]