Error Trace
[Home]
Bug # 174
Show/hide error trace Error trace
{ 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 26 typedef unsigned int __u32; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 291 struct kernel_symbol { unsigned long value; const char *name; } ; 34 struct module ; 18 typedef unsigned short umode_t; 29 typedef _Bool bool; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 157 typedef unsigned int gfp_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 115 typedef void (*ctor_fn_t)(); 486 struct completion ; 27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ; 65 union __anonunion___u_11 { struct list_head *__val; char __c[1U]; } ; 105 union __anonunion___u_13 { struct list_head *__val; char __c[1U]; } ; 202 union __anonunion___u_15 { struct list_head *__val; char __c[1U]; } ; 546 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 50 struct task_struct ; 39 struct page ; 26 struct mm_struct ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 15 typedef unsigned long pudval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_34 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_34 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_35 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_35 pgd_t; 276 struct __anonstruct_pud_t_36 { pudval_t pud; } ; 276 typedef struct __anonstruct_pud_t_36 pud_t; 297 struct __anonstruct_pmd_t_37 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_37 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 445 struct seq_file ; 483 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 83 struct static_key { atomic_t enabled; } ; 38 union __anonunion___u_44 { int __val; char __c[1U]; } ; 21 union __anonunion___u_46 { long __val; char __c[1U]; } ; 33 union __anonunion___u_48 { long __val; char __c[1U]; } ; 23 typedef atomic64_t atomic_long_t; 359 struct cpumask { unsigned long bits[128U]; } ; 657 typedef struct cpumask *cpumask_var_t; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 593 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_77 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_76 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_77 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_76 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 70 struct vm_area_struct ; 39 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 44 typedef struct __wait_queue_head wait_queue_head_t; 13 struct optimistic_spin_queue { atomic_t tail; } ; 40 struct mutex { atomic_long_t owner; spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; } ; 229 struct rw_semaphore ; 230 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 1109 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; struct lockdep_map lockdep_map; } ; 235 struct workqueue_struct ; 236 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 38 struct ldt_struct ; 38 struct vdso_image ; 38 struct __anonstruct_mm_context_t_159 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; void *bd_addr; } ; 38 typedef struct __anonstruct_mm_context_t_159 mm_context_t; 551 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 49 struct mm_rss_stat { atomic_long_t count[4U]; } ; 85 struct completion { unsigned int done; wait_queue_head_t wait; } ; 111 struct xol_area ; 112 struct uprobes_state { struct xol_area *xol_area; } ; 151 struct address_space ; 152 struct mem_cgroup ; 153 union __anonunion____missing_field_name_212 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 153 union __anonunion____missing_field_name_213 { unsigned long index; void *freelist; } ; 153 struct __anonstruct____missing_field_name_217 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 153 union __anonunion____missing_field_name_216 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_217 __annonCompField38; int units; } ; 153 struct __anonstruct____missing_field_name_215 { union __anonunion____missing_field_name_216 __annonCompField39; atomic_t _refcount; } ; 153 union __anonunion____missing_field_name_214 { unsigned long counters; struct __anonstruct____missing_field_name_215 __annonCompField40; } ; 153 struct dev_pagemap ; 153 struct __anonstruct____missing_field_name_219 { struct page *next; int pages; int pobjects; } ; 153 struct __anonstruct____missing_field_name_220 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 153 struct __anonstruct____missing_field_name_221 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 153 union __anonunion____missing_field_name_218 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_219 __annonCompField42; struct callback_head callback_head; struct __anonstruct____missing_field_name_220 __annonCompField43; struct __anonstruct____missing_field_name_221 __annonCompField44; } ; 153 struct kmem_cache ; 153 union __anonunion____missing_field_name_222 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 153 struct page { unsigned long flags; union __anonunion____missing_field_name_212 __annonCompField36; union __anonunion____missing_field_name_213 __annonCompField37; union __anonunion____missing_field_name_214 __annonCompField41; union __anonunion____missing_field_name_218 __annonCompField45; union __anonunion____missing_field_name_222 __annonCompField46; struct mem_cgroup *mem_cgroup; } ; 266 struct userfaultfd_ctx ; 266 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 273 struct __anonstruct_shared_223 { struct rb_node rb; unsigned long rb_subtree_last; } ; 273 struct anon_vma ; 273 struct vm_operations_struct ; 273 struct mempolicy ; 273 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_223 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 346 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 351 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 357 struct kioctx_table ; 358 struct linux_binfmt ; 358 struct user_namespace ; 358 struct mmu_notifier_mm ; 358 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 544 struct vm_fault ; 277 struct vm_fault { struct vm_area_struct *vma; unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; pmd_t *pmd; pud_t *pud; pte_t orig_pte; struct page *cow_page; struct mem_cgroup *memcg; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 340 enum page_entry_size { PE_SIZE_PTE = 0, PE_SIZE_PMD = 1, PE_SIZE_PUD = 2 } ; 346 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_fault *); int (*huge_fault)(struct vm_fault *, enum page_entry_size ); void (*map_pages)(struct vm_fault *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_fault *); int (*pfn_mkwrite)(struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 59 struct kobject ; 19 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 65 struct radix_tree_root ; 65 union __anonunion____missing_field_name_251 { struct list_head private_list; struct callback_head callback_head; } ; 65 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char exceptional; struct radix_tree_node *parent; struct radix_tree_root *root; union __anonunion____missing_field_name_251 __annonCompField49; void *slots[64U]; unsigned long tags[3U][1U]; } ; 107 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 176 struct ida { struct radix_tree_root ida_rt; } ; 220 struct kernfs_open_node ; 221 struct kernfs_iattrs ; 245 struct kernfs_root ; 245 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 86 struct kernfs_node ; 86 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 90 struct kernfs_ops ; 90 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 97 union __anonunion____missing_field_name_260 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 97 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_260 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 139 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 158 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 174 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; struct seq_file *seq_file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; bool released; const struct vm_operations_struct *vm_ops; } ; 194 struct kernfs_ops { int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 521 struct sock ; 522 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 528 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct refcount_struct { atomic_t refs; } ; 11 typedef struct refcount_struct refcount_t; 41 struct kref { refcount_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_263 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_263 __annonCompField51; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 38 struct exception_table_entry ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 49 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 276 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 283 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 288 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 304 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 318 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 326 struct module_sect_attrs ; 326 struct module_notes_attrs ; 326 struct trace_event_call ; 326 struct trace_enum_map ; 326 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const s32 *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; const struct kernel_symbol *unused_syms; const s32 *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const s32 *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const s32 *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 679 struct zpool ; 680 struct zpool_ops { int (*evict)(struct zpool *, unsigned long); } ; 19 enum zpool_mapmode { ZPOOL_MM_RW = 0, ZPOOL_MM_RO = 1, ZPOOL_MM_WO = 2, ZPOOL_MM_DEFAULT = 0 } ; 62 struct zpool_driver { char *type; struct module *owner; atomic_t refcount; struct list_head list; void * (*create)(const char *, gfp_t , const struct zpool_ops *, struct zpool *); void (*destroy)(void *); int (*malloc)(void *, size_t , gfp_t , unsigned long *); void (*free)(void *, unsigned long); int (*shrink)(void *, unsigned int, unsigned int *); void * (*map)(void *, unsigned long, enum zpool_mapmode ); void (*unmap)(void *, unsigned long); u64 (*total_size)(void *); } ; 109 struct z3fold_pool ; 110 struct z3fold_ops { int (*evict)(struct z3fold_pool *, unsigned long); } ; 42 enum buddy { HEADLESS = 0, FIRST = 1, MIDDLE = 2, LAST = 3, BUDDIES_MAX = 4 } ; 50 struct z3fold_header { struct list_head buddy; spinlock_t page_lock; struct kref refcount; unsigned short first_chunks; unsigned short middle_chunks; unsigned short last_chunks; unsigned short start_middle; unsigned char first_num; } ; 72 struct z3fold_pool { spinlock_t lock; struct list_head unbuddied[62U]; struct list_head lru; atomic64_t pages_nr; const struct z3fold_ops *ops; struct zpool *zpool; const struct zpool_ops *zpool_ops; } ; 387 union __anonunion___u_275 { struct list_head *__val; char __c[1U]; } ; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 long int __builtin_expect(long, long); 252 void __read_once_size(const volatile void *p, void *res, int size); 277 void __write_once_size(volatile void *p, void *res, int size); 34 extern struct module __this_module; 72 void set_bit(long nr, volatile unsigned long *addr); 110 void clear_bit(long nr, volatile unsigned long *addr); 321 bool constant_test_bit(long nr, const volatile unsigned long *addr); 172 int printk(const char *, ...); 25 void INIT_LIST_HEAD(struct list_head *list); 32 bool __list_add_valid(struct list_head *, struct list_head *, struct list_head *); 35 bool __list_del_entry_valid(struct list_head *); 55 void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next); 76 void list_add(struct list_head *new, struct list_head *head); 102 void __list_del(struct list_head *prev, struct list_head *next); 114 void __list_del_entry(struct list_head *entry); 122 void list_del(struct list_head *entry); 156 void list_del_init(struct list_head *entry); 200 int list_empty(const struct list_head *head); 71 void warn_slowpath_null(const char *, const int); 7 extern unsigned long page_offset_base; 9 extern unsigned long vmemmap_base; 23 unsigned long int __phys_addr(unsigned long); 36 void atomic_set(atomic_t *v, int i); 19 long int atomic64_read(const atomic64_t *v); 31 void atomic64_set(atomic64_t *v, long i); 84 void atomic64_inc(atomic64_t *v); 97 void atomic64_dec(atomic64_t *v); 61 void * __memmove(void *, const void *, size_t ); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 39 void _raw_spin_unlock(raw_spinlock_t *); 286 raw_spinlock_t * spinlock_check(spinlock_t *lock); 297 void spin_lock(spinlock_t *lock); 321 void ldv_spin_lock_25(spinlock_t *lock); 325 void ldv_spin_lock_27(spinlock_t *lock); 345 void spin_unlock(spinlock_t *lock); 361 void ldv_spin_unlock_26(spinlock_t *lock); 365 void ldv_spin_unlock_28(spinlock_t *lock); 11 void __ldv_spin_lock(spinlock_t *); 14 void ldv___ldv_spin_lock_11(spinlock_t *ldv_func_arg1); 18 void ldv___ldv_spin_lock_19(spinlock_t *ldv_func_arg1); 22 void ldv___ldv_spin_lock_21(spinlock_t *ldv_func_arg1); 30 void ldv_spin_lock_lock_of_NOT_ARG_SIGN(); 38 void ldv_spin_lock_lock_of_z3fold_pool(); 39 void ldv_spin_unlock_lock_of_z3fold_pool(); 46 void ldv_spin_lock_node_size_lock_of_pglist_data(); 54 void ldv_spin_lock_page_lock_of_z3fold_header(); 55 void ldv_spin_unlock_page_lock_of_z3fold_header(); 457 struct page * alloc_pages_current(gfp_t , unsigned int); 460 struct page * alloc_pages(gfp_t gfp_mask, unsigned int order); 496 void __free_pages(struct page *, unsigned int); 1020 void * lowmem_page_address(const struct page *page); 15 void refcount_set(refcount_t *r, unsigned int n); 29 void refcount_inc(refcount_t *); 34 bool refcount_dec_and_test(refcount_t *); 31 void kref_init(struct kref *kref); 45 void kref_get(struct kref *kref); 67 int kref_put(struct kref *kref, void (*release)(struct kref *)); 154 void kfree(const void *); 330 void * __kmalloc(size_t , gfp_t ); 478 void * kmalloc(size_t size, gfp_t flags); 661 void * kzalloc(size_t size, gfp_t flags); 106 void zpool_register_driver(struct zpool_driver *); 108 int zpool_unregister_driver(struct zpool_driver *); 134 int size_to_chunks(size_t size); 143 struct z3fold_header * init_z3fold_page(struct page *page); 163 void free_z3fold_page(struct page *page); 168 void release_z3fold_page(struct kref *ref); 184 void z3fold_page_lock(struct z3fold_header *zhdr); 190 void z3fold_page_unlock(struct z3fold_header *zhdr); 199 unsigned long int encode_handle(struct z3fold_header *zhdr, enum buddy bud); 210 struct z3fold_header * handle_to_z3fold_header(unsigned long handle); 220 enum buddy handle_to_buddy(unsigned long handle); 230 int num_free_chunks(struct z3fold_header *zhdr); 261 struct z3fold_pool * z3fold_create_pool(gfp_t gfp, const struct z3fold_ops *ops); 285 void z3fold_destroy_pool(struct z3fold_pool *pool); 290 void * mchunk_memmove(struct z3fold_header *zhdr, unsigned short dst_chunk); 301 int z3fold_compact_page(struct z3fold_header *zhdr); 364 int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, unsigned long *handle); 483 void z3fold_free(struct z3fold_pool *pool, unsigned long handle); 582 int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries); 697 void * z3fold_map(struct z3fold_pool *pool, unsigned long handle); 741 void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle); 766 u64 z3fold_get_pool_size(struct z3fold_pool *pool); 775 int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle); 783 const struct z3fold_ops z3fold_zpool_ops = { &z3fold_zpool_evict }; 787 void * z3fold_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool); 801 void z3fold_zpool_destroy(void *pool); 806 int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle); 811 void z3fold_zpool_free(void *pool, unsigned long handle); 816 int z3fold_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed); 835 void * z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm); 840 void z3fold_zpool_unmap(void *pool, unsigned long handle); 845 u64 z3fold_zpool_total_size(void *pool); 850 struct zpool_driver z3fold_zpool_driver = { (char *)"z3fold", &__this_module, { 0 }, { 0, 0 }, &z3fold_zpool_create, &z3fold_zpool_destroy, &z3fold_zpool_malloc, &z3fold_zpool_free, &z3fold_zpool_shrink, &z3fold_zpool_map, &z3fold_zpool_unmap, &z3fold_zpool_total_size }; 865 int init_z3fold(); 874 void exit_z3fold(); 902 void ldv_check_final_state(); 911 void ldv_initialize(); 914 void ldv_handler_precall(); 917 int nondet_int(); 920 int LDV_IN_INTERRUPT = 0; 923 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 25 int ldv_undef_int(); 7 bool ldv_is_err(const void *ptr); 14 void * ldv_err_ptr(long error); 21 long int ldv_ptr_err(const void *ptr); 28 bool ldv_is_err_or_null(const void *ptr); 9 int ldv_spin_lock_of_NOT_ARG_SIGN = 1; 21 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(); 30 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(); 56 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(); 63 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(); 84 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(); 91 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(); 112 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(); 134 int ldv_spin_lock_of_z3fold_pool = 1; 155 int ldv_spin_trylock_lock_of_z3fold_pool(); 181 void ldv_spin_unlock_wait_lock_of_z3fold_pool(); 188 int ldv_spin_is_locked_lock_of_z3fold_pool(); 209 int ldv_spin_can_lock_lock_of_z3fold_pool(); 216 int ldv_spin_is_contended_lock_of_z3fold_pool(); 237 int ldv_atomic_dec_and_lock_lock_of_z3fold_pool(); 259 int ldv_spin_node_size_lock_of_pglist_data = 1; 271 void ldv_spin_unlock_node_size_lock_of_pglist_data(); 280 int ldv_spin_trylock_node_size_lock_of_pglist_data(); 306 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(); 313 int ldv_spin_is_locked_node_size_lock_of_pglist_data(); 334 int ldv_spin_can_lock_node_size_lock_of_pglist_data(); 341 int ldv_spin_is_contended_node_size_lock_of_pglist_data(); 362 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(); 384 int ldv_spin_page_lock_of_z3fold_header = 1; 405 int ldv_spin_trylock_page_lock_of_z3fold_header(); 431 void ldv_spin_unlock_wait_page_lock_of_z3fold_header(); 438 int ldv_spin_is_locked_page_lock_of_z3fold_header(); 459 int ldv_spin_can_lock_page_lock_of_z3fold_header(); 466 int ldv_spin_is_contended_page_lock_of_z3fold_header(); 487 int ldv_atomic_dec_and_lock_page_lock_of_z3fold_header(); 509 int ldv_spin_ptl = 1; 512 void ldv_spin_lock_ptl(); 521 void ldv_spin_unlock_ptl(); 530 int ldv_spin_trylock_ptl(); 556 void ldv_spin_unlock_wait_ptl(); 563 int ldv_spin_is_locked_ptl(); 584 int ldv_spin_can_lock_ptl(); 591 int ldv_spin_is_contended_ptl(); 612 int ldv_atomic_dec_and_lock_ptl(); return ; } { 925 struct z3fold_pool *var_group1; 926 unsigned long var_z3fold_zpool_evict_20_p1; 927 const char *var_z3fold_zpool_create_21_p0; 928 unsigned int var_z3fold_zpool_create_21_p1; 929 const struct zpool_ops *var_z3fold_zpool_create_21_p2; 930 struct zpool *var_z3fold_zpool_create_21_p3; 931 void *var_z3fold_zpool_destroy_22_p0; 932 void *var_z3fold_zpool_malloc_23_p0; 933 unsigned long var_z3fold_zpool_malloc_23_p1; 934 unsigned int var_z3fold_zpool_malloc_23_p2; 935 unsigned long *var_z3fold_zpool_malloc_23_p3; 936 void *var_z3fold_zpool_free_24_p0; 937 unsigned long var_z3fold_zpool_free_24_p1; 938 void *var_z3fold_zpool_shrink_25_p0; 939 unsigned int var_z3fold_zpool_shrink_25_p1; 940 unsigned int *var_z3fold_zpool_shrink_25_p2; 941 void *var_z3fold_zpool_map_26_p0; 942 unsigned long var_z3fold_zpool_map_26_p1; 943 enum zpool_mapmode var_z3fold_zpool_map_26_p2; 944 void *var_z3fold_zpool_unmap_27_p0; 945 unsigned long var_z3fold_zpool_unmap_27_p1; 946 void *var_z3fold_zpool_total_size_28_p0; 947 int tmp; 948 int tmp___0; 949 int tmp___1; 1118 LDV_IN_INTERRUPT = 1; 1127 ldv_initialize() { /* Function call is skipped due to function is undefined */} 1146 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 869 zpool_register_driver(&z3fold_zpool_driver) { /* Function call is skipped due to function is undefined */} } 1154 goto ldv_22220; 1154 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 1156 goto ldv_22219; 1155 ldv_22219:; 1157 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 1157 switch (tmp___0); 1330 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} 1331 -z3fold_zpool_shrink(var_z3fold_zpool_shrink_25_p0, var_z3fold_zpool_shrink_25_p1, var_z3fold_zpool_shrink_25_p2) { } 818 unsigned int total; 819 int ret; 819 total = 0U; 820 ret = -22; 822 goto ldv_22135; 824 goto ldv_22134; 823 ldv_22134:; { 584 int i; 585 int ret; 586 int freechunks; 587 struct z3fold_header *zhdr; 588 struct page *page; 589 unsigned long first_handle; 590 unsigned long middle_handle; 591 unsigned long last_handle; 592 int tmp; 593 const struct list_head *__mptr; 594 void *tmp___0; 595 int tmp___1; 596 _Bool tmp___2; 597 int tmp___3; 598 int tmp___4; 599 _Bool tmp___5; 584 ret = 0; 587 first_handle = 0UL; 587 middle_handle = 0UL; 587 last_handle = 0UL; { { } 299 _raw_spin_lock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 300 return ;; } 590 unsigned long __CPAchecker_TMP_0 = (unsigned long)(pool->ops); 590 unsigned long __CPAchecker_TMP_1 = (unsigned long)(pool->ops->evict); 590 assume(!(__CPAchecker_TMP_1 == ((unsigned long)((int (*)(struct z3fold_pool *, unsigned long))0)))); 594 i = 0; 594 goto ldv_22073; 596 goto ldv_22072; 595 ldv_22072:; { 202 union __anonunion___u_15 __u; { 254 switch (size); 255 assume(!(size == 1)); 255 assume(!(size == 2)); 255 assume(!(size == 4)); 255 assume(size == 8); 254 *((__u64 *)res) = *((volatile __u64 *)p); 254 goto ldv_883; 256 return ;; } 202 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } 599 __mptr = (const struct list_head *)(pool->lru.prev); 599 page = ((struct page *)__mptr) + 18446744073709551584UL; { { 116 _Bool tmp; 117 int tmp___0; 116 tmp = __list_del_entry_valid(entry) { /* Function call is skipped due to function is undefined */} 116 assume(tmp == 0); 116 tmp___0 = 1; 116 assume(!(tmp___0 == 0)); 117 return ;; } { 27 union __anonunion___u_9 __u; 27 __u.__val = list; { 279 switch (size); 280 assume(!(size == 1)); 281 assume(!(size == 2)); 282 assume(!(size == 4)); 283 assume(size == 8); 283 *((volatile __u64 *)p) = *((__u64 *)res); 283 goto ldv_905; 290 return ;; } 28 list->prev = list; 29 return ;; } 160 return ;; } { 1022 return (void *)(((unsigned long)(((unsigned long long)((((long)page) - ((long)vmemmap_base)) / 64L)) << 12)) + page_offset_base);; } 602 zhdr = (struct z3fold_header *)tmp___0; { 323 return (((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1) != 0;; } 603 tmp___3 = 1; { 202 union __anonunion___u_15 __u; { 254 switch (size); 255 assume(!(size == 1)); 255 assume(!(size == 2)); 255 assume(!(size == 4)); 255 assume(size == 8); 254 *((__u64 *)res) = *((volatile __u64 *)p); 254 goto ldv_883; 256 return ;; } 202 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } { 47 refcount_inc(&(kref->refcount)) { /* Function call is skipped due to function is undefined */} 48 return ;; } { { } 347 _raw_spin_unlock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 348 return ;; } { { } { } 299 _raw_spin_lock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 300 return ;; } 614 first_handle = 0UL; 615 last_handle = 0UL; 616 middle_handle = 0UL; 617 unsigned int __CPAchecker_TMP_2 = (unsigned int)(zhdr->first_chunks); 619 unsigned int __CPAchecker_TMP_3 = (unsigned int)(zhdr->middle_chunks); 621 unsigned int __CPAchecker_TMP_4 = (unsigned int)(zhdr->last_chunks); { { } { } 347 _raw_spin_unlock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 348 return ;; } 651 next:; { 323 return (((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1) != 0;; } { { } { } 299 _raw_spin_lock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 300 return ;; } 656 unsigned int __CPAchecker_TMP_5 = (unsigned int)(zhdr->first_chunks); 656 unsigned int __CPAchecker_TMP_6 = (unsigned int)(zhdr->last_chunks); 656 unsigned int __CPAchecker_TMP_7 = (unsigned int)(zhdr->middle_chunks); { { } { } 347 _raw_spin_unlock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 348 return ;; } { { } 299 _raw_spin_lock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 300 return ;; } { 69 int __ret_warn_on; 70 long tmp; 71 _Bool tmp___0; 69 __ret_warn_on = ((unsigned long)release) == ((unsigned long)((void (*)(struct kref *))0)); 69 tmp = __builtin_expect(__ret_warn_on != 0, 0L) { /* Function call is skipped due to function is undefined */} 69 assume(tmp != 0L); 69 warn_slowpath_null("./include/linux/kref.h", 69) { /* Function call is skipped due to function is undefined */} 69 __builtin_expect(__ret_warn_on != 0, 0L) { /* Function call is skipped due to function is undefined */} 71 tmp___0 = refcount_dec_and_test(&(kref->refcount)) { /* Function call is skipped due to function is undefined */} 71 assume(!(((int)tmp___0) == 0)); 72 assume(!(release == (&z3fold_zpool_destroy))); 72 assume(release == (&release_z3fold_page)); { 72 skip 170 struct z3fold_header *zhdr; 171 struct page *page; 172 const struct kref *__mptr; 173 unsigned long tmp; 174 int tmp___0; 175 int tmp___1; 173 __mptr = (const struct kref *)ref; 173 zhdr = ((struct z3fold_header *)__mptr) + 18446744073709551528UL; 174 tmp = __phys_addr((unsigned long)zhdr) { /* Function call is skipped due to function is undefined */} 174 page = (struct page *)((tmp >> 12) + vmemmap_base); { 202 union __anonunion___u_15 __u; { 254 switch (size); 255 assume(!(size == 1)); 255 assume(!(size == 2)); 255 assume(!(size == 4)); 255 assume(size == 8); 254 *((__u64 *)res) = *((volatile __u64 *)p); 254 goto ldv_883; 256 return ;; } 202 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } { } 202 union __anonunion___u_15 __u; { 254 switch (size); 255 assume(!(size == 1)); 255 assume(!(size == 2)); 255 assume(!(size == 4)); 255 assume(size == 8); 254 *((__u64 *)res) = *((volatile __u64 *)p); 254 goto ldv_883; 256 return ;; } 202 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } 73 return 1;; } { }99 Ignored inline assembler code 102 return ;; } 826 total = total + 1U; 827 ldv_22135:; 824 goto ldv_22134; 823 ldv_22134:; { } 584 int i; 585 int ret; 586 int freechunks; 587 struct z3fold_header *zhdr; 588 struct page *page; 589 unsigned long first_handle; 590 unsigned long middle_handle; 591 unsigned long last_handle; 592 int tmp; 593 const struct list_head *__mptr; 594 void *tmp___0; 595 int tmp___1; 596 _Bool tmp___2; 597 int tmp___3; 598 int tmp___4; 599 _Bool tmp___5; 584 ret = 0; 587 first_handle = 0UL; 587 middle_handle = 0UL; 587 last_handle = 0UL; { }} | Source code
1 #ifndef _ASM_X86_ATOMIC64_64_H
2 #define _ASM_X86_ATOMIC64_64_H
3
4 #include <linux/types.h>
5 #include <asm/alternative.h>
6 #include <asm/cmpxchg.h>
7
8 /* The 64-bit atomic type */
9
10 #define ATOMIC64_INIT(i) { (i) }
11
12 /**
13 * atomic64_read - read atomic64 variable
14 * @v: pointer of type atomic64_t
15 *
16 * Atomically reads the value of @v.
17 * Doesn't imply a read memory barrier.
18 */
19 static inline long atomic64_read(const atomic64_t *v)
20 {
21 return READ_ONCE((v)->counter);
22 }
23
24 /**
25 * atomic64_set - set atomic64 variable
26 * @v: pointer to type atomic64_t
27 * @i: required value
28 *
29 * Atomically sets the value of @v to @i.
30 */
31 static inline void atomic64_set(atomic64_t *v, long i)
32 {
33 WRITE_ONCE(v->counter, i);
34 }
35
36 /**
37 * atomic64_add - add integer to atomic64 variable
38 * @i: integer value to add
39 * @v: pointer to type atomic64_t
40 *
41 * Atomically adds @i to @v.
42 */
43 static __always_inline void atomic64_add(long i, atomic64_t *v)
44 {
45 asm volatile(LOCK_PREFIX "addq %1,%0"
46 : "=m" (v->counter)
47 : "er" (i), "m" (v->counter));
48 }
49
50 /**
51 * atomic64_sub - subtract the atomic64 variable
52 * @i: integer value to subtract
53 * @v: pointer to type atomic64_t
54 *
55 * Atomically subtracts @i from @v.
56 */
57 static inline void atomic64_sub(long i, atomic64_t *v)
58 {
59 asm volatile(LOCK_PREFIX "subq %1,%0"
60 : "=m" (v->counter)
61 : "er" (i), "m" (v->counter));
62 }
63
64 /**
65 * atomic64_sub_and_test - subtract value from variable and test result
66 * @i: integer value to subtract
67 * @v: pointer to type atomic64_t
68 *
69 * Atomically subtracts @i from @v and returns
70 * true if the result is zero, or false for all
71 * other cases.
72 */
73 static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
74 {
75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
76 }
77
78 /**
79 * atomic64_inc - increment atomic64 variable
80 * @v: pointer to type atomic64_t
81 *
82 * Atomically increments @v by 1.
83 */
84 static __always_inline void atomic64_inc(atomic64_t *v)
85 {
86 asm volatile(LOCK_PREFIX "incq %0"
87 : "=m" (v->counter)
88 : "m" (v->counter));
89 }
90
91 /**
92 * atomic64_dec - decrement atomic64 variable
93 * @v: pointer to type atomic64_t
94 *
95 * Atomically decrements @v by 1.
96 */
97 static __always_inline void atomic64_dec(atomic64_t *v)
98 {
99 asm volatile(LOCK_PREFIX "decq %0"
100 : "=m" (v->counter)
101 : "m" (v->counter));
102 }
103
104 /**
105 * atomic64_dec_and_test - decrement and test
106 * @v: pointer to type atomic64_t
107 *
108 * Atomically decrements @v by 1 and
109 * returns true if the result is 0, or false for all other
110 * cases.
111 */
112 static inline bool atomic64_dec_and_test(atomic64_t *v)
113 {
114 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
115 }
116
117 /**
118 * atomic64_inc_and_test - increment and test
119 * @v: pointer to type atomic64_t
120 *
121 * Atomically increments @v by 1
122 * and returns true if the result is zero, or false for all
123 * other cases.
124 */
125 static inline bool atomic64_inc_and_test(atomic64_t *v)
126 {
127 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
128 }
129
130 /**
131 * atomic64_add_negative - add and test if negative
132 * @i: integer value to add
133 * @v: pointer to type atomic64_t
134 *
135 * Atomically adds @i to @v and returns true
136 * if the result is negative, or false when
137 * result is greater than or equal to zero.
138 */
139 static inline bool atomic64_add_negative(long i, atomic64_t *v)
140 {
141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
142 }
143
144 /**
145 * atomic64_add_return - add and return
146 * @i: integer value to add
147 * @v: pointer to type atomic64_t
148 *
149 * Atomically adds @i to @v and returns @i + @v
150 */
151 static __always_inline long atomic64_add_return(long i, atomic64_t *v)
152 {
153 return i + xadd(&v->counter, i);
154 }
155
156 static inline long atomic64_sub_return(long i, atomic64_t *v)
157 {
158 return atomic64_add_return(-i, v);
159 }
160
161 static inline long atomic64_fetch_add(long i, atomic64_t *v)
162 {
163 return xadd(&v->counter, i);
164 }
165
166 static inline long atomic64_fetch_sub(long i, atomic64_t *v)
167 {
168 return xadd(&v->counter, -i);
169 }
170
171 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
172 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
173
174 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
175 {
176 return cmpxchg(&v->counter, old, new);
177 }
178
179 static inline long atomic64_xchg(atomic64_t *v, long new)
180 {
181 return xchg(&v->counter, new);
182 }
183
184 /**
185 * atomic64_add_unless - add unless the number is a given value
186 * @v: pointer of type atomic64_t
187 * @a: the amount to add to v...
188 * @u: ...unless v is equal to u.
189 *
190 * Atomically adds @a to @v, so long as it was not @u.
191 * Returns the old value of @v.
192 */
193 static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
194 {
195 long c, old;
196 c = atomic64_read(v);
197 for (;;) {
198 if (unlikely(c == (u)))
199 break;
200 old = atomic64_cmpxchg((v), c, c + (a));
201 if (likely(old == c))
202 break;
203 c = old;
204 }
205 return c != (u);
206 }
207
208 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
209
210 /*
211 * atomic64_dec_if_positive - decrement by 1 if old value positive
212 * @v: pointer of type atomic_t
213 *
214 * The function returns the old value of *v minus 1, even if
215 * the atomic variable, v, was not decremented.
216 */
217 static inline long atomic64_dec_if_positive(atomic64_t *v)
218 {
219 long c, old, dec;
220 c = atomic64_read(v);
221 for (;;) {
222 dec = c - 1;
223 if (unlikely(dec < 0))
224 break;
225 old = atomic64_cmpxchg((v), c, dec);
226 if (likely(old == c))
227 break;
228 c = old;
229 }
230 return dec;
231 }
232
233 #define ATOMIC64_OP(op) \
234 static inline void atomic64_##op(long i, atomic64_t *v) \
235 { \
236 asm volatile(LOCK_PREFIX #op"q %1,%0" \
237 : "+m" (v->counter) \
238 : "er" (i) \
239 : "memory"); \
240 }
241
242 #define ATOMIC64_FETCH_OP(op, c_op) \
243 static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
244 { \
245 long old, val = atomic64_read(v); \
246 for (;;) { \
247 old = atomic64_cmpxchg(v, val, val c_op i); \
248 if (old == val) \
249 break; \
250 val = old; \
251 } \
252 return old; \
253 }
254
255 #define ATOMIC64_OPS(op, c_op) \
256 ATOMIC64_OP(op) \
257 ATOMIC64_FETCH_OP(op, c_op)
258
259 ATOMIC64_OPS(and, &)
260 ATOMIC64_OPS(or, |)
261 ATOMIC64_OPS(xor, ^)
262
263 #undef ATOMIC64_OPS
264 #undef ATOMIC64_FETCH_OP
265 #undef ATOMIC64_OP
266
267 #endif /* _ASM_X86_ATOMIC64_64_H */ 1 #ifndef _ASM_X86_BITOPS_H
2 #define _ASM_X86_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 *
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
9 */
10
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14
15 #include <linux/compiler.h>
16 #include <asm/alternative.h>
17 #include <asm/rmwcc.h>
18 #include <asm/barrier.h>
19
20 #if BITS_PER_LONG == 32
21 # define _BITOPS_LONG_SHIFT 5
22 #elif BITS_PER_LONG == 64
23 # define _BITOPS_LONG_SHIFT 6
24 #else
25 # error "Unexpected BITS_PER_LONG"
26 #endif
27
28 #define BIT_64(n) (U64_C(1) << (n))
29
30 /*
31 * These have to be done with inline assembly: that way the bit-setting
32 * is guaranteed to be atomic. All bit operations return 0 if the bit
33 * was cleared before the operation and != 0 if it was not.
34 *
35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
36 */
37
38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
39 /* Technically wrong, but this avoids compilation errors on some gcc
40 versions. */
41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
42 #else
43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
44 #endif
45
46 #define ADDR BITOP_ADDR(addr)
47
48 /*
49 * We do the locked ops that don't return the old value as
50 * a mask operation on a byte.
51 */
52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
54 #define CONST_MASK(nr) (1 << ((nr) & 7))
55
56 /**
57 * set_bit - Atomically set a bit in memory
58 * @nr: the bit to set
59 * @addr: the address to start counting from
60 *
61 * This function is atomic and may not be reordered. See __set_bit()
62 * if you do not require the atomic guarantees.
63 *
64 * Note: there are no guarantees that this function will not be reordered
65 * on non x86 architectures, so if you are writing portable code,
66 * make sure not to rely on its reordering guarantees.
67 *
68 * Note that @nr may be almost arbitrarily large; this function is not
69 * restricted to acting on a single-word quantity.
70 */
71 static __always_inline void
72 set_bit(long nr, volatile unsigned long *addr)
73 {
74 if (IS_IMMEDIATE(nr)) {
75 asm volatile(LOCK_PREFIX "orb %1,%0"
76 : CONST_MASK_ADDR(nr, addr)
77 : "iq" ((u8)CONST_MASK(nr))
78 : "memory");
79 } else {
80 asm volatile(LOCK_PREFIX "bts %1,%0"
81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
82 }
83 }
84
85 /**
86 * __set_bit - Set a bit in memory
87 * @nr: the bit to set
88 * @addr: the address to start counting from
89 *
90 * Unlike set_bit(), this function is non-atomic and may be reordered.
91 * If it's called on the same region of memory simultaneously, the effect
92 * may be that only one operation succeeds.
93 */
94 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
95 {
96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
97 }
98
99 /**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109 static __always_inline void
110 clear_bit(long nr, volatile unsigned long *addr)
111 {
112 if (IS_IMMEDIATE(nr)) {
113 asm volatile(LOCK_PREFIX "andb %1,%0"
114 : CONST_MASK_ADDR(nr, addr)
115 : "iq" ((u8)~CONST_MASK(nr)));
116 } else {
117 asm volatile(LOCK_PREFIX "btr %1,%0"
118 : BITOP_ADDR(addr)
119 : "Ir" (nr));
120 }
121 }
122
123 /*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132 {
133 barrier();
134 clear_bit(nr, addr);
135 }
136
137 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
138 {
139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140 }
141
142 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
143 {
144 bool negative;
145 asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
146 CC_SET(s)
147 : CC_OUT(s) (negative), ADDR
148 : "ir" ((char) ~(1 << nr)) : "memory");
149 return negative;
150 }
151
152 // Let everybody know we have it
153 #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
154
155 /*
156 * __clear_bit_unlock - Clears a bit in memory
157 * @nr: Bit to clear
158 * @addr: Address to start counting from
159 *
160 * __clear_bit() is non-atomic and implies release semantics before the memory
161 * operation. It can be used for an unlock if no other CPUs can concurrently
162 * modify other bits in the word.
163 *
164 * No memory barrier is required here, because x86 cannot reorder stores past
165 * older loads. Same principle as spin_unlock.
166 */
167 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
168 {
169 barrier();
170 __clear_bit(nr, addr);
171 }
172
173 /**
174 * __change_bit - Toggle a bit in memory
175 * @nr: the bit to change
176 * @addr: the address to start counting from
177 *
178 * Unlike change_bit(), this function is non-atomic and may be reordered.
179 * If it's called on the same region of memory simultaneously, the effect
180 * may be that only one operation succeeds.
181 */
182 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
183 {
184 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
185 }
186
187 /**
188 * change_bit - Toggle a bit in memory
189 * @nr: Bit to change
190 * @addr: Address to start counting from
191 *
192 * change_bit() is atomic and may not be reordered.
193 * Note that @nr may be almost arbitrarily large; this function is not
194 * restricted to acting on a single-word quantity.
195 */
196 static __always_inline void change_bit(long nr, volatile unsigned long *addr)
197 {
198 if (IS_IMMEDIATE(nr)) {
199 asm volatile(LOCK_PREFIX "xorb %1,%0"
200 : CONST_MASK_ADDR(nr, addr)
201 : "iq" ((u8)CONST_MASK(nr)));
202 } else {
203 asm volatile(LOCK_PREFIX "btc %1,%0"
204 : BITOP_ADDR(addr)
205 : "Ir" (nr));
206 }
207 }
208
209 /**
210 * test_and_set_bit - Set a bit and return its old value
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This operation is atomic and cannot be reordered.
215 * It also implies a memory barrier.
216 */
217 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
218 {
219 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
220 }
221
222 /**
223 * test_and_set_bit_lock - Set a bit and return its old value for lock
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This is the same as test_and_set_bit on x86.
228 */
229 static __always_inline bool
230 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
231 {
232 return test_and_set_bit(nr, addr);
233 }
234
235 /**
236 * __test_and_set_bit - Set a bit and return its old value
237 * @nr: Bit to set
238 * @addr: Address to count from
239 *
240 * This operation is non-atomic and can be reordered.
241 * If two examples of this operation race, one can appear to succeed
242 * but actually fail. You must protect multiple accesses with a lock.
243 */
244 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
245 {
246 bool oldbit;
247
248 asm("bts %2,%1\n\t"
249 CC_SET(c)
250 : CC_OUT(c) (oldbit), ADDR
251 : "Ir" (nr));
252 return oldbit;
253 }
254
255 /**
256 * test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is atomic and cannot be reordered.
261 * It also implies a memory barrier.
262 */
263 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
264 {
265 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
266 }
267
268 /**
269 * __test_and_clear_bit - Clear a bit and return its old value
270 * @nr: Bit to clear
271 * @addr: Address to count from
272 *
273 * This operation is non-atomic and can be reordered.
274 * If two examples of this operation race, one can appear to succeed
275 * but actually fail. You must protect multiple accesses with a lock.
276 *
277 * Note: the operation is performed atomically with respect to
278 * the local CPU, but not other CPUs. Portable code should not
279 * rely on this behaviour.
280 * KVM relies on this behaviour on x86 for modifying memory that is also
281 * accessed from a hypervisor on the same CPU if running in a VM: don't change
282 * this without also updating arch/x86/kernel/kvm.c
283 */
284 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
285 {
286 bool oldbit;
287
288 asm volatile("btr %2,%1\n\t"
289 CC_SET(c)
290 : CC_OUT(c) (oldbit), ADDR
291 : "Ir" (nr));
292 return oldbit;
293 }
294
295 /* WARNING: non atomic and it can be reordered! */
296 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
297 {
298 bool oldbit;
299
300 asm volatile("btc %2,%1\n\t"
301 CC_SET(c)
302 : CC_OUT(c) (oldbit), ADDR
303 : "Ir" (nr) : "memory");
304
305 return oldbit;
306 }
307
308 /**
309 * test_and_change_bit - Change a bit and return its old value
310 * @nr: Bit to change
311 * @addr: Address to count from
312 *
313 * This operation is atomic and cannot be reordered.
314 * It also implies a memory barrier.
315 */
316 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
317 {
318 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
319 }
320
321 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
322 {
323 return ((1UL << (nr & (BITS_PER_LONG-1))) &
324 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
325 }
326
327 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
328 {
329 bool oldbit;
330
331 asm volatile("bt %2,%1\n\t"
332 CC_SET(c)
333 : CC_OUT(c) (oldbit)
334 : "m" (*(unsigned long *)addr), "Ir" (nr));
335
336 return oldbit;
337 }
338
339 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
340 /**
341 * test_bit - Determine whether a bit is set
342 * @nr: bit number to test
343 * @addr: Address to start counting from
344 */
345 static bool test_bit(int nr, const volatile unsigned long *addr);
346 #endif
347
348 #define test_bit(nr, addr) \
349 (__builtin_constant_p((nr)) \
350 ? constant_test_bit((nr), (addr)) \
351 : variable_test_bit((nr), (addr)))
352
353 /**
354 * __ffs - find first set bit in word
355 * @word: The word to search
356 *
357 * Undefined if no bit exists, so code should check against 0 first.
358 */
359 static __always_inline unsigned long __ffs(unsigned long word)
360 {
361 asm("rep; bsf %1,%0"
362 : "=r" (word)
363 : "rm" (word));
364 return word;
365 }
366
367 /**
368 * ffz - find first zero bit in word
369 * @word: The word to search
370 *
371 * Undefined if no zero exists, so code should check against ~0UL first.
372 */
373 static __always_inline unsigned long ffz(unsigned long word)
374 {
375 asm("rep; bsf %1,%0"
376 : "=r" (word)
377 : "r" (~word));
378 return word;
379 }
380
381 /*
382 * __fls: find last set bit in word
383 * @word: The word to search
384 *
385 * Undefined if no set bit exists, so code should check against 0 first.
386 */
387 static __always_inline unsigned long __fls(unsigned long word)
388 {
389 asm("bsr %1,%0"
390 : "=r" (word)
391 : "rm" (word));
392 return word;
393 }
394
395 #undef ADDR
396
397 #ifdef __KERNEL__
398 /**
399 * ffs - find first set bit in word
400 * @x: the word to search
401 *
402 * This is defined the same way as the libc and compiler builtin ffs
403 * routines, therefore differs in spirit from the other bitops.
404 *
405 * ffs(value) returns 0 if value is 0 or the position of the first
406 * set bit if value is nonzero. The first (least significant) bit
407 * is at position 1.
408 */
409 static __always_inline int ffs(int x)
410 {
411 int r;
412
413 #ifdef CONFIG_X86_64
414 /*
415 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
416 * dest reg is undefined if x==0, but their CPU architect says its
417 * value is written to set it to the same as before, except that the
418 * top 32 bits will be cleared.
419 *
420 * We cannot do this on 32 bits because at the very least some
421 * 486 CPUs did not behave this way.
422 */
423 asm("bsfl %1,%0"
424 : "=r" (r)
425 : "rm" (x), "0" (-1));
426 #elif defined(CONFIG_X86_CMOV)
427 asm("bsfl %1,%0\n\t"
428 "cmovzl %2,%0"
429 : "=&r" (r) : "rm" (x), "r" (-1));
430 #else
431 asm("bsfl %1,%0\n\t"
432 "jnz 1f\n\t"
433 "movl $-1,%0\n"
434 "1:" : "=r" (r) : "rm" (x));
435 #endif
436 return r + 1;
437 }
438
439 /**
440 * fls - find last set bit in word
441 * @x: the word to search
442 *
443 * This is defined in a similar way as the libc and compiler builtin
444 * ffs, but returns the position of the most significant set bit.
445 *
446 * fls(value) returns 0 if value is 0 or the position of the last
447 * set bit if value is nonzero. The last (most significant) bit is
448 * at position 32.
449 */
450 static __always_inline int fls(int x)
451 {
452 int r;
453
454 #ifdef CONFIG_X86_64
455 /*
456 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
457 * dest reg is undefined if x==0, but their CPU architect says its
458 * value is written to set it to the same as before, except that the
459 * top 32 bits will be cleared.
460 *
461 * We cannot do this on 32 bits because at the very least some
462 * 486 CPUs did not behave this way.
463 */
464 asm("bsrl %1,%0"
465 : "=r" (r)
466 : "rm" (x), "0" (-1));
467 #elif defined(CONFIG_X86_CMOV)
468 asm("bsrl %1,%0\n\t"
469 "cmovzl %2,%0"
470 : "=&r" (r) : "rm" (x), "rm" (-1));
471 #else
472 asm("bsrl %1,%0\n\t"
473 "jnz 1f\n\t"
474 "movl $-1,%0\n"
475 "1:" : "=r" (r) : "rm" (x));
476 #endif
477 return r + 1;
478 }
479
480 /**
481 * fls64 - find last set bit in a 64-bit word
482 * @x: the word to search
483 *
484 * This is defined in a similar way as the libc and compiler builtin
485 * ffsll, but returns the position of the most significant set bit.
486 *
487 * fls64(value) returns 0 if value is 0 or the position of the last
488 * set bit if value is nonzero. The last (most significant) bit is
489 * at position 64.
490 */
491 #ifdef CONFIG_X86_64
492 static __always_inline int fls64(__u64 x)
493 {
494 int bitpos = -1;
495 /*
496 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
497 * dest reg is undefined if x==0, but their CPU architect says its
498 * value is written to set it to the same as before.
499 */
500 asm("bsrq %1,%q0"
501 : "+r" (bitpos)
502 : "rm" (x));
503 return bitpos + 1;
504 }
505 #else
506 #include <asm-generic/bitops/fls64.h>
507 #endif
508
509 #include <asm-generic/bitops/find.h>
510
511 #include <asm-generic/bitops/sched.h>
512
513 #include <asm/arch_hweight.h>
514
515 #include <asm-generic/bitops/const_hweight.h>
516
517 #include <asm-generic/bitops/le.h>
518
519 #include <asm-generic/bitops/ext2-atomic-setbit.h>
520
521 #endif /* __KERNEL__ */
522 #endif /* _ASM_X86_BITOPS_H */ 1
2 /*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/preempt.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/zpool.h>
34
35 /*****************
36 * Structures
37 *****************/
38 struct z3fold_pool;
39 struct z3fold_ops {
40 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
41 };
42
43 enum buddy {
44 HEADLESS = 0,
45 FIRST,
46 MIDDLE,
47 LAST,
48 BUDDIES_MAX
49 };
50
51 /*
52 * struct z3fold_header - z3fold page metadata occupying the first chunk of each
53 * z3fold page, except for HEADLESS pages
54 * @buddy: links the z3fold page into the relevant list in the pool
55 * @page_lock: per-page lock
56 * @refcount: reference cound for the z3fold page
57 * @first_chunks: the size of the first buddy in chunks, 0 if free
58 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
59 * @last_chunks: the size of the last buddy in chunks, 0 if free
60 * @first_num: the starting number (for the first handle)
61 */
62 struct z3fold_header {
63 struct list_head buddy;
64 spinlock_t page_lock;
65 struct kref refcount;
66 unsigned short first_chunks;
67 unsigned short middle_chunks;
68 unsigned short last_chunks;
69 unsigned short start_middle;
70 unsigned short first_num:2;
71 };
72
73 /*
74 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
75 * adjusting internal fragmentation. It also determines the number of
76 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
77 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
78 * in the beginning of an allocated page are occupied by z3fold header, so
79 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
80 * which shows the max number of free chunks in z3fold page, also there will
81 * be 63, or 62, respectively, freelists per pool.
82 */
83 #define NCHUNKS_ORDER 6
84
85 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
86 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
87 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
88 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
89 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
90 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
91
92 #define BUDDY_MASK (0x3)
93
94 /**
95 * struct z3fold_pool - stores metadata for each z3fold pool
96 * @lock: protects all pool fields and first|last_chunk fields of any
97 * z3fold page in the pool
98 * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
99 * the lists each z3fold page is added to depends on the size of
100 * its free region.
101 * @lru: list tracking the z3fold pages in LRU order by most recently
102 * added buddy.
103 * @pages_nr: number of z3fold pages in the pool.
104 * @ops: pointer to a structure of user defined operations specified at
105 * pool creation time.
106 *
107 * This structure is allocated at pool creation time and maintains metadata
108 * pertaining to a particular z3fold pool.
109 */
110 struct z3fold_pool {
111 spinlock_t lock;
112 struct list_head unbuddied[NCHUNKS];
113 struct list_head lru;
114 atomic64_t pages_nr;
115 const struct z3fold_ops *ops;
116 struct zpool *zpool;
117 const struct zpool_ops *zpool_ops;
118 };
119
120 /*
121 * Internal z3fold page flags
122 */
123 enum z3fold_page_flags {
124 PAGE_HEADLESS = 0,
125 MIDDLE_CHUNK_MAPPED,
126 };
127
128
129 /*****************
130 * Helpers
131 *****************/
132
133 /* Converts an allocation size in bytes to size in z3fold chunks */
134 static int size_to_chunks(size_t size)
135 {
136 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
137 }
138
139 #define for_each_unbuddied_list(_iter, _begin) \
140 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
141
142 /* Initializes the z3fold header of a newly allocated z3fold page */
143 static struct z3fold_header *init_z3fold_page(struct page *page)
144 {
145 struct z3fold_header *zhdr = page_address(page);
146
147 INIT_LIST_HEAD(&page->lru);
148 clear_bit(PAGE_HEADLESS, &page->private);
149 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
150
151 spin_lock_init(&zhdr->page_lock);
152 kref_init(&zhdr->refcount);
153 zhdr->first_chunks = 0;
154 zhdr->middle_chunks = 0;
155 zhdr->last_chunks = 0;
156 zhdr->first_num = 0;
157 zhdr->start_middle = 0;
158 INIT_LIST_HEAD(&zhdr->buddy);
159 return zhdr;
160 }
161
162 /* Resets the struct page fields and frees the page */
163 static void free_z3fold_page(struct page *page)
164 {
165 __free_page(page);
166 }
167
168 static void release_z3fold_page(struct kref *ref)
169 {
170 struct z3fold_header *zhdr;
171 struct page *page;
172
173 zhdr = container_of(ref, struct z3fold_header, refcount);
174 page = virt_to_page(zhdr);
175
176 if (!list_empty(&zhdr->buddy))
177 list_del(&zhdr->buddy);
178 if (!list_empty(&page->lru))
179 list_del(&page->lru);
180 free_z3fold_page(page);
181 }
182
183 /* Lock a z3fold page */
184 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
185 {
186 spin_lock(&zhdr->page_lock);
187 }
188
189 /* Unlock a z3fold page */
190 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
191 {
192 spin_unlock(&zhdr->page_lock);
193 }
194
195 /*
196 * Encodes the handle of a particular buddy within a z3fold page
197 * Pool lock should be held as this function accesses first_num
198 */
199 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
200 {
201 unsigned long handle;
202
203 handle = (unsigned long)zhdr;
204 if (bud != HEADLESS)
205 handle += (bud + zhdr->first_num) & BUDDY_MASK;
206 return handle;
207 }
208
209 /* Returns the z3fold page where a given handle is stored */
210 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
211 {
212 return (struct z3fold_header *)(handle & PAGE_MASK);
213 }
214
215 /*
216 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
217 * but that doesn't matter. because the masking will result in the
218 * correct buddy number.
219 */
220 static enum buddy handle_to_buddy(unsigned long handle)
221 {
222 struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
223 return (handle - zhdr->first_num) & BUDDY_MASK;
224 }
225
226 /*
227 * Returns the number of free chunks in a z3fold page.
228 * NB: can't be used with HEADLESS pages.
229 */
230 static int num_free_chunks(struct z3fold_header *zhdr)
231 {
232 int nfree;
233 /*
234 * If there is a middle object, pick up the bigger free space
235 * either before or after it. Otherwise just subtract the number
236 * of chunks occupied by the first and the last objects.
237 */
238 if (zhdr->middle_chunks != 0) {
239 int nfree_before = zhdr->first_chunks ?
240 0 : zhdr->start_middle - ZHDR_CHUNKS;
241 int nfree_after = zhdr->last_chunks ?
242 0 : TOTAL_CHUNKS -
243 (zhdr->start_middle + zhdr->middle_chunks);
244 nfree = max(nfree_before, nfree_after);
245 } else
246 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
247 return nfree;
248 }
249
250 /*****************
251 * API Functions
252 *****************/
253 /**
254 * z3fold_create_pool() - create a new z3fold pool
255 * @gfp: gfp flags when allocating the z3fold pool structure
256 * @ops: user-defined operations for the z3fold pool
257 *
258 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
259 * failed.
260 */
261 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
262 const struct z3fold_ops *ops)
263 {
264 struct z3fold_pool *pool;
265 int i;
266
267 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
268 if (!pool)
269 return NULL;
270 spin_lock_init(&pool->lock);
271 for_each_unbuddied_list(i, 0)
272 INIT_LIST_HEAD(&pool->unbuddied[i]);
273 INIT_LIST_HEAD(&pool->lru);
274 atomic64_set(&pool->pages_nr, 0);
275 pool->ops = ops;
276 return pool;
277 }
278
279 /**
280 * z3fold_destroy_pool() - destroys an existing z3fold pool
281 * @pool: the z3fold pool to be destroyed
282 *
283 * The pool should be emptied before this function is called.
284 */
285 static void z3fold_destroy_pool(struct z3fold_pool *pool)
286 {
287 kfree(pool);
288 }
289
290 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
291 unsigned short dst_chunk)
292 {
293 void *beg = zhdr;
294 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
295 beg + (zhdr->start_middle << CHUNK_SHIFT),
296 zhdr->middle_chunks << CHUNK_SHIFT);
297 }
298
299 #define BIG_CHUNK_GAP 3
300 /* Has to be called with lock held */
301 static int z3fold_compact_page(struct z3fold_header *zhdr)
302 {
303 struct page *page = virt_to_page(zhdr);
304
305 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
306 return 0; /* can't move middle chunk, it's used */
307
308 if (zhdr->middle_chunks == 0)
309 return 0; /* nothing to compact */
310
311 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
312 /* move to the beginning */
313 mchunk_memmove(zhdr, ZHDR_CHUNKS);
314 zhdr->first_chunks = zhdr->middle_chunks;
315 zhdr->middle_chunks = 0;
316 zhdr->start_middle = 0;
317 zhdr->first_num++;
318 return 1;
319 }
320
321 /*
322 * moving data is expensive, so let's only do that if
323 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
324 */
325 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
326 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
327 BIG_CHUNK_GAP) {
328 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
329 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
330 return 1;
331 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
332 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
333 + zhdr->middle_chunks) >=
334 BIG_CHUNK_GAP) {
335 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
336 zhdr->middle_chunks;
337 mchunk_memmove(zhdr, new_start);
338 zhdr->start_middle = new_start;
339 return 1;
340 }
341
342 return 0;
343 }
344
345 /**
346 * z3fold_alloc() - allocates a region of a given size
347 * @pool: z3fold pool from which to allocate
348 * @size: size in bytes of the desired allocation
349 * @gfp: gfp flags used if the pool needs to grow
350 * @handle: handle of the new allocation
351 *
352 * This function will attempt to find a free region in the pool large enough to
353 * satisfy the allocation request. A search of the unbuddied lists is
354 * performed first. If no suitable free region is found, then a new page is
355 * allocated and added to the pool to satisfy the request.
356 *
357 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
358 * as z3fold pool pages.
359 *
360 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
361 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
362 * a new page.
363 */
364 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
365 unsigned long *handle)
366 {
367 int chunks = 0, i, freechunks;
368 struct z3fold_header *zhdr = NULL;
369 enum buddy bud;
370 struct page *page;
371
372 if (!size || (gfp & __GFP_HIGHMEM))
373 return -EINVAL;
374
375 if (size > PAGE_SIZE)
376 return -ENOSPC;
377
378 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
379 bud = HEADLESS;
380 else {
381 chunks = size_to_chunks(size);
382
383 /* First, try to find an unbuddied z3fold page. */
384 zhdr = NULL;
385 for_each_unbuddied_list(i, chunks) {
386 spin_lock(&pool->lock);
387 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
388 struct z3fold_header, buddy);
389 if (!zhdr) {
390 spin_unlock(&pool->lock);
391 continue;
392 }
393 kref_get(&zhdr->refcount);
394 list_del_init(&zhdr->buddy);
395 spin_unlock(&pool->lock);
396
397 page = virt_to_page(zhdr);
398 z3fold_page_lock(zhdr);
399 if (zhdr->first_chunks == 0) {
400 if (zhdr->middle_chunks != 0 &&
401 chunks >= zhdr->start_middle)
402 bud = LAST;
403 else
404 bud = FIRST;
405 } else if (zhdr->last_chunks == 0)
406 bud = LAST;
407 else if (zhdr->middle_chunks == 0)
408 bud = MIDDLE;
409 else {
410 z3fold_page_unlock(zhdr);
411 spin_lock(&pool->lock);
412 if (kref_put(&zhdr->refcount,
413 release_z3fold_page))
414 atomic64_dec(&pool->pages_nr);
415 spin_unlock(&pool->lock);
416 pr_err("No free chunks in unbuddied\n");
417 WARN_ON(1);
418 continue;
419 }
420 goto found;
421 }
422 bud = FIRST;
423 }
424
425 /* Couldn't find unbuddied z3fold page, create new one */
426 page = alloc_page(gfp);
427 if (!page)
428 return -ENOMEM;
429
430 atomic64_inc(&pool->pages_nr);
431 zhdr = init_z3fold_page(page);
432
433 if (bud == HEADLESS) {
434 set_bit(PAGE_HEADLESS, &page->private);
435 spin_lock(&pool->lock);
436 goto headless;
437 }
438 z3fold_page_lock(zhdr);
439
440 found:
441 if (bud == FIRST)
442 zhdr->first_chunks = chunks;
443 else if (bud == LAST)
444 zhdr->last_chunks = chunks;
445 else {
446 zhdr->middle_chunks = chunks;
447 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
448 }
449
450 spin_lock(&pool->lock);
451 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
452 zhdr->middle_chunks == 0) {
453 /* Add to unbuddied list */
454 freechunks = num_free_chunks(zhdr);
455 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
456 }
457
458 headless:
459 /* Add/move z3fold page to beginning of LRU */
460 if (!list_empty(&page->lru))
461 list_del(&page->lru);
462
463 list_add(&page->lru, &pool->lru);
464
465 *handle = encode_handle(zhdr, bud);
466 spin_unlock(&pool->lock);
467 if (bud != HEADLESS)
468 z3fold_page_unlock(zhdr);
469
470 return 0;
471 }
472
473 /**
474 * z3fold_free() - frees the allocation associated with the given handle
475 * @pool: pool in which the allocation resided
476 * @handle: handle associated with the allocation returned by z3fold_alloc()
477 *
478 * In the case that the z3fold page in which the allocation resides is under
479 * reclaim, as indicated by the PG_reclaim flag being set, this function
480 * only sets the first|last_chunks to 0. The page is actually freed
481 * once both buddies are evicted (see z3fold_reclaim_page() below).
482 */
483 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
484 {
485 struct z3fold_header *zhdr;
486 int freechunks;
487 struct page *page;
488 enum buddy bud;
489
490 zhdr = handle_to_z3fold_header(handle);
491 page = virt_to_page(zhdr);
492
493 if (test_bit(PAGE_HEADLESS, &page->private)) {
494 /* HEADLESS page stored */
495 bud = HEADLESS;
496 } else {
497 z3fold_page_lock(zhdr);
498 bud = handle_to_buddy(handle);
499
500 switch (bud) {
501 case FIRST:
502 zhdr->first_chunks = 0;
503 break;
504 case MIDDLE:
505 zhdr->middle_chunks = 0;
506 zhdr->start_middle = 0;
507 break;
508 case LAST:
509 zhdr->last_chunks = 0;
510 break;
511 default:
512 pr_err("%s: unknown bud %d\n", __func__, bud);
513 WARN_ON(1);
514 z3fold_page_unlock(zhdr);
515 return;
516 }
517 }
518
519 if (bud == HEADLESS) {
520 spin_lock(&pool->lock);
521 list_del(&page->lru);
522 spin_unlock(&pool->lock);
523 free_z3fold_page(page);
524 atomic64_dec(&pool->pages_nr);
525 } else {
526 if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 ||
527 zhdr->last_chunks != 0) {
528 z3fold_compact_page(zhdr);
529 /* Add to the unbuddied list */
530 spin_lock(&pool->lock);
531 if (!list_empty(&zhdr->buddy))
532 list_del(&zhdr->buddy);
533 freechunks = num_free_chunks(zhdr);
534 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
535 spin_unlock(&pool->lock);
536 }
537 z3fold_page_unlock(zhdr);
538 spin_lock(&pool->lock);
539 if (kref_put(&zhdr->refcount, release_z3fold_page))
540 atomic64_dec(&pool->pages_nr);
541 spin_unlock(&pool->lock);
542 }
543
544 }
545
546 /**
547 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
548 * @pool: pool from which a page will attempt to be evicted
549 * @retires: number of pages on the LRU list for which eviction will
550 * be attempted before failing
551 *
552 * z3fold reclaim is different from normal system reclaim in that it is done
553 * from the bottom, up. This is because only the bottom layer, z3fold, has
554 * information on how the allocations are organized within each z3fold page.
555 * This has the potential to create interesting locking situations between
556 * z3fold and the user, however.
557 *
558 * To avoid these, this is how z3fold_reclaim_page() should be called:
559
560 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
561 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
562 * call the user-defined eviction handler with the pool and handle as
563 * arguments.
564 *
565 * If the handle can not be evicted, the eviction handler should return
566 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
567 * appropriate list and try the next z3fold page on the LRU up to
568 * a user defined number of retries.
569 *
570 * If the handle is successfully evicted, the eviction handler should
571 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
572 * contains logic to delay freeing the page if the page is under reclaim,
573 * as indicated by the setting of the PG_reclaim flag on the underlying page.
574 *
575 * If all buddies in the z3fold page are successfully evicted, then the
576 * z3fold page can be freed.
577 *
578 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
579 * no pages to evict or an eviction handler is not registered, -EAGAIN if
580 * the retry limit was hit.
581 */
582 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
583 {
584 int i, ret = 0, freechunks;
585 struct z3fold_header *zhdr;
586 struct page *page;
587 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
588
589 spin_lock(&pool->lock);
590 if (!pool->ops || !pool->ops->evict || retries == 0) {
591 spin_unlock(&pool->lock);
592 return -EINVAL;
593 }
594 for (i = 0; i < retries; i++) {
595 if (list_empty(&pool->lru)) {
596 spin_unlock(&pool->lock);
597 return -EINVAL;
598 }
599 page = list_last_entry(&pool->lru, struct page, lru);
600 list_del_init(&page->lru);
601
602 zhdr = page_address(page);
603 if (!test_bit(PAGE_HEADLESS, &page->private)) {
604 if (!list_empty(&zhdr->buddy))
605 list_del_init(&zhdr->buddy);
606 kref_get(&zhdr->refcount);
607 spin_unlock(&pool->lock);
608 z3fold_page_lock(zhdr);
609 /*
610 * We need encode the handles before unlocking, since
611 * we can race with free that will set
612 * (first|last)_chunks to 0
613 */
614 first_handle = 0;
615 last_handle = 0;
616 middle_handle = 0;
617 if (zhdr->first_chunks)
618 first_handle = encode_handle(zhdr, FIRST);
619 if (zhdr->middle_chunks)
620 middle_handle = encode_handle(zhdr, MIDDLE);
621 if (zhdr->last_chunks)
622 last_handle = encode_handle(zhdr, LAST);
623 z3fold_page_unlock(zhdr);
624 } else {
625 first_handle = encode_handle(zhdr, HEADLESS);
626 last_handle = middle_handle = 0;
627 spin_unlock(&pool->lock);
628 }
629
630 /* Issue the eviction callback(s) */
631 if (middle_handle) {
632 ret = pool->ops->evict(pool, middle_handle);
633 if (ret)
634 goto next;
635 }
636 if (first_handle) {
637 ret = pool->ops->evict(pool, first_handle);
638 if (ret)
639 goto next;
640 }
641 if (last_handle) {
642 ret = pool->ops->evict(pool, last_handle);
643 if (ret)
644 goto next;
645 }
646 next:
647 if (test_bit(PAGE_HEADLESS, &page->private)) {
648 if (ret == 0) {
649 free_z3fold_page(page);
650 return 0;
651 } else {
652 spin_lock(&pool->lock);
653 }
654 } else {
655 z3fold_page_lock(zhdr);
656 if ((zhdr->first_chunks || zhdr->last_chunks ||
657 zhdr->middle_chunks) &&
658 !(zhdr->first_chunks && zhdr->last_chunks &&
659 zhdr->middle_chunks)) {
660 z3fold_compact_page(zhdr);
661 /* add to unbuddied list */
662 spin_lock(&pool->lock);
663 freechunks = num_free_chunks(zhdr);
664 list_add(&zhdr->buddy,
665 &pool->unbuddied[freechunks]);
666 spin_unlock(&pool->lock);
667 }
668 z3fold_page_unlock(zhdr);
669 spin_lock(&pool->lock);
670 if (kref_put(&zhdr->refcount, release_z3fold_page)) {
671 atomic64_dec(&pool->pages_nr);
672 return 0;
673 }
674 }
675
676 /*
677 * Add to the beginning of LRU.
678 * Pool lock has to be kept here to ensure the page has
679 * not already been released
680 */
681 list_add(&page->lru, &pool->lru);
682 }
683 spin_unlock(&pool->lock);
684 return -EAGAIN;
685 }
686
687 /**
688 * z3fold_map() - maps the allocation associated with the given handle
689 * @pool: pool in which the allocation resides
690 * @handle: handle associated with the allocation to be mapped
691 *
692 * Extracts the buddy number from handle and constructs the pointer to the
693 * correct starting chunk within the page.
694 *
695 * Returns: a pointer to the mapped allocation
696 */
697 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
698 {
699 struct z3fold_header *zhdr;
700 struct page *page;
701 void *addr;
702 enum buddy buddy;
703
704 zhdr = handle_to_z3fold_header(handle);
705 addr = zhdr;
706 page = virt_to_page(zhdr);
707
708 if (test_bit(PAGE_HEADLESS, &page->private))
709 goto out;
710
711 z3fold_page_lock(zhdr);
712 buddy = handle_to_buddy(handle);
713 switch (buddy) {
714 case FIRST:
715 addr += ZHDR_SIZE_ALIGNED;
716 break;
717 case MIDDLE:
718 addr += zhdr->start_middle << CHUNK_SHIFT;
719 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
720 break;
721 case LAST:
722 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
723 break;
724 default:
725 pr_err("unknown buddy id %d\n", buddy);
726 WARN_ON(1);
727 addr = NULL;
728 break;
729 }
730
731 z3fold_page_unlock(zhdr);
732 out:
733 return addr;
734 }
735
736 /**
737 * z3fold_unmap() - unmaps the allocation associated with the given handle
738 * @pool: pool in which the allocation resides
739 * @handle: handle associated with the allocation to be unmapped
740 */
741 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
742 {
743 struct z3fold_header *zhdr;
744 struct page *page;
745 enum buddy buddy;
746
747 zhdr = handle_to_z3fold_header(handle);
748 page = virt_to_page(zhdr);
749
750 if (test_bit(PAGE_HEADLESS, &page->private))
751 return;
752
753 z3fold_page_lock(zhdr);
754 buddy = handle_to_buddy(handle);
755 if (buddy == MIDDLE)
756 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
757 z3fold_page_unlock(zhdr);
758 }
759
760 /**
761 * z3fold_get_pool_size() - gets the z3fold pool size in pages
762 * @pool: pool whose size is being queried
763 *
764 * Returns: size in pages of the given pool.
765 */
766 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
767 {
768 return atomic64_read(&pool->pages_nr);
769 }
770
771 /*****************
772 * zpool
773 ****************/
774
775 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
776 {
777 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
778 return pool->zpool_ops->evict(pool->zpool, handle);
779 else
780 return -ENOENT;
781 }
782
783 static const struct z3fold_ops z3fold_zpool_ops = {
784 .evict = z3fold_zpool_evict
785 };
786
787 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
788 const struct zpool_ops *zpool_ops,
789 struct zpool *zpool)
790 {
791 struct z3fold_pool *pool;
792
793 pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
794 if (pool) {
795 pool->zpool = zpool;
796 pool->zpool_ops = zpool_ops;
797 }
798 return pool;
799 }
800
801 static void z3fold_zpool_destroy(void *pool)
802 {
803 z3fold_destroy_pool(pool);
804 }
805
806 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
807 unsigned long *handle)
808 {
809 return z3fold_alloc(pool, size, gfp, handle);
810 }
811 static void z3fold_zpool_free(void *pool, unsigned long handle)
812 {
813 z3fold_free(pool, handle);
814 }
815
816 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
817 unsigned int *reclaimed)
818 {
819 unsigned int total = 0;
820 int ret = -EINVAL;
821
822 while (total < pages) {
823 ret = z3fold_reclaim_page(pool, 8);
824 if (ret < 0)
825 break;
826 total++;
827 }
828
829 if (reclaimed)
830 *reclaimed = total;
831
832 return ret;
833 }
834
835 static void *z3fold_zpool_map(void *pool, unsigned long handle,
836 enum zpool_mapmode mm)
837 {
838 return z3fold_map(pool, handle);
839 }
840 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
841 {
842 z3fold_unmap(pool, handle);
843 }
844
845 static u64 z3fold_zpool_total_size(void *pool)
846 {
847 return z3fold_get_pool_size(pool) * PAGE_SIZE;
848 }
849
850 static struct zpool_driver z3fold_zpool_driver = {
851 .type = "z3fold",
852 .owner = THIS_MODULE,
853 .create = z3fold_zpool_create,
854 .destroy = z3fold_zpool_destroy,
855 .malloc = z3fold_zpool_malloc,
856 .free = z3fold_zpool_free,
857 .shrink = z3fold_zpool_shrink,
858 .map = z3fold_zpool_map,
859 .unmap = z3fold_zpool_unmap,
860 .total_size = z3fold_zpool_total_size,
861 };
862
863 MODULE_ALIAS("zpool-z3fold");
864
865 static int __init init_z3fold(void)
866 {
867 /* Make sure the z3fold header is not larger than the page size */
868 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
869 zpool_register_driver(&z3fold_zpool_driver);
870
871 return 0;
872 }
873
874 static void __exit exit_z3fold(void)
875 {
876 zpool_unregister_driver(&z3fold_zpool_driver);
877 }
878
879 module_init(init_z3fold);
880 module_exit(exit_z3fold);
881
882 MODULE_LICENSE("GPL");
883 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
884 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
885
886
887
888
889
890 /* LDV_COMMENT_BEGIN_MAIN */
891 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
892
893 /*###########################################################################*/
894
895 /*############## Driver Environment Generator 0.2 output ####################*/
896
897 /*###########################################################################*/
898
899
900
901 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
902 void ldv_check_final_state(void);
903
904 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
905 void ldv_check_return_value(int res);
906
907 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
908 void ldv_check_return_value_probe(int res);
909
910 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
911 void ldv_initialize(void);
912
913 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
914 void ldv_handler_precall(void);
915
916 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
917 int nondet_int(void);
918
919 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
920 int LDV_IN_INTERRUPT;
921
922 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
923 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
924
925
926
927 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
928 /*============================= VARIABLE DECLARATION PART =============================*/
929 /** STRUCT: struct type: z3fold_ops, struct name: z3fold_zpool_ops **/
930 /* content: static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)*/
931 /* LDV_COMMENT_BEGIN_PREP */
932 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
933 #define NCHUNKS_ORDER 6
934 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
935 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
936 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
937 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
938 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
939 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
940 #define BUDDY_MASK (0x3)
941 #define for_each_unbuddied_list(_iter, _begin) \
942 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
943 #define BIG_CHUNK_GAP 3
944 /* LDV_COMMENT_END_PREP */
945 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_evict" */
946 struct z3fold_pool * var_group1;
947 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_evict" */
948 unsigned long var_z3fold_zpool_evict_20_p1;
949
950 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
951 /* content: static void *z3fold_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool)*/
952 /* LDV_COMMENT_BEGIN_PREP */
953 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
954 #define NCHUNKS_ORDER 6
955 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
956 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
957 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
958 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
959 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
960 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
961 #define BUDDY_MASK (0x3)
962 #define for_each_unbuddied_list(_iter, _begin) \
963 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
964 #define BIG_CHUNK_GAP 3
965 /* LDV_COMMENT_END_PREP */
966 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
967 const char * var_z3fold_zpool_create_21_p0;
968 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
969 gfp_t var_z3fold_zpool_create_21_p1;
970 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
971 const struct zpool_ops * var_z3fold_zpool_create_21_p2;
972 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
973 struct zpool * var_z3fold_zpool_create_21_p3;
974 /* content: static void z3fold_zpool_destroy(void *pool)*/
975 /* LDV_COMMENT_BEGIN_PREP */
976 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
977 #define NCHUNKS_ORDER 6
978 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
979 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
980 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
981 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
982 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
983 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
984 #define BUDDY_MASK (0x3)
985 #define for_each_unbuddied_list(_iter, _begin) \
986 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
987 #define BIG_CHUNK_GAP 3
988 /* LDV_COMMENT_END_PREP */
989 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_destroy" */
990 void * var_z3fold_zpool_destroy_22_p0;
991 /* content: static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle)*/
992 /* LDV_COMMENT_BEGIN_PREP */
993 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
994 #define NCHUNKS_ORDER 6
995 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
996 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
997 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
998 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
999 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1000 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1001 #define BUDDY_MASK (0x3)
1002 #define for_each_unbuddied_list(_iter, _begin) \
1003 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1004 #define BIG_CHUNK_GAP 3
1005 /* LDV_COMMENT_END_PREP */
1006 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1007 void * var_z3fold_zpool_malloc_23_p0;
1008 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1009 size_t var_z3fold_zpool_malloc_23_p1;
1010 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1011 gfp_t var_z3fold_zpool_malloc_23_p2;
1012 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1013 unsigned long * var_z3fold_zpool_malloc_23_p3;
1014 /* content: static void z3fold_zpool_free(void *pool, unsigned long handle)*/
1015 /* LDV_COMMENT_BEGIN_PREP */
1016 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1017 #define NCHUNKS_ORDER 6
1018 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1019 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1020 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1021 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1022 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1023 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1024 #define BUDDY_MASK (0x3)
1025 #define for_each_unbuddied_list(_iter, _begin) \
1026 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1027 #define BIG_CHUNK_GAP 3
1028 /* LDV_COMMENT_END_PREP */
1029 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_free" */
1030 void * var_z3fold_zpool_free_24_p0;
1031 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_free" */
1032 unsigned long var_z3fold_zpool_free_24_p1;
1033 /* content: static int z3fold_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed)*/
1034 /* LDV_COMMENT_BEGIN_PREP */
1035 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1036 #define NCHUNKS_ORDER 6
1037 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1038 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1039 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1040 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1041 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1042 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1043 #define BUDDY_MASK (0x3)
1044 #define for_each_unbuddied_list(_iter, _begin) \
1045 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1046 #define BIG_CHUNK_GAP 3
1047 /* LDV_COMMENT_END_PREP */
1048 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_shrink" */
1049 void * var_z3fold_zpool_shrink_25_p0;
1050 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_shrink" */
1051 unsigned int var_z3fold_zpool_shrink_25_p1;
1052 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_shrink" */
1053 unsigned int * var_z3fold_zpool_shrink_25_p2;
1054 /* content: static void *z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm)*/
1055 /* LDV_COMMENT_BEGIN_PREP */
1056 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1057 #define NCHUNKS_ORDER 6
1058 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1059 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1060 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1061 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1062 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1063 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1064 #define BUDDY_MASK (0x3)
1065 #define for_each_unbuddied_list(_iter, _begin) \
1066 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1067 #define BIG_CHUNK_GAP 3
1068 /* LDV_COMMENT_END_PREP */
1069 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_map" */
1070 void * var_z3fold_zpool_map_26_p0;
1071 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_map" */
1072 unsigned long var_z3fold_zpool_map_26_p1;
1073 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_map" */
1074 enum zpool_mapmode var_z3fold_zpool_map_26_p2;
1075 /* content: static void z3fold_zpool_unmap(void *pool, unsigned long handle)*/
1076 /* LDV_COMMENT_BEGIN_PREP */
1077 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1078 #define NCHUNKS_ORDER 6
1079 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1080 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1081 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1082 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1083 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1084 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1085 #define BUDDY_MASK (0x3)
1086 #define for_each_unbuddied_list(_iter, _begin) \
1087 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1088 #define BIG_CHUNK_GAP 3
1089 /* LDV_COMMENT_END_PREP */
1090 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_unmap" */
1091 void * var_z3fold_zpool_unmap_27_p0;
1092 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_unmap" */
1093 unsigned long var_z3fold_zpool_unmap_27_p1;
1094 /* content: static u64 z3fold_zpool_total_size(void *pool)*/
1095 /* LDV_COMMENT_BEGIN_PREP */
1096 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1097 #define NCHUNKS_ORDER 6
1098 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1099 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1100 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1101 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1102 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1103 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1104 #define BUDDY_MASK (0x3)
1105 #define for_each_unbuddied_list(_iter, _begin) \
1106 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1107 #define BIG_CHUNK_GAP 3
1108 /* LDV_COMMENT_END_PREP */
1109 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_total_size" */
1110 void * var_z3fold_zpool_total_size_28_p0;
1111
1112
1113
1114
1115 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
1116 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
1117 /*============================= VARIABLE INITIALIZING PART =============================*/
1118 LDV_IN_INTERRUPT=1;
1119
1120
1121
1122
1123 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
1124 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
1125 /*============================= FUNCTION CALL SECTION =============================*/
1126 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
1127 ldv_initialize();
1128
1129 /** INIT: init_type: ST_MODULE_INIT **/
1130 /* content: static int __init init_z3fold(void)*/
1131 /* LDV_COMMENT_BEGIN_PREP */
1132 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1133 #define NCHUNKS_ORDER 6
1134 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1135 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1136 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1137 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1138 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1139 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1140 #define BUDDY_MASK (0x3)
1141 #define for_each_unbuddied_list(_iter, _begin) \
1142 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1143 #define BIG_CHUNK_GAP 3
1144 /* LDV_COMMENT_END_PREP */
1145 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
1146 ldv_handler_precall();
1147 if(init_z3fold())
1148 goto ldv_final;
1149
1150
1151
1152
1153
1154 while( nondet_int()
1155 ) {
1156
1157 switch(nondet_int()) {
1158
1159 case 0: {
1160
1161 /** STRUCT: struct type: z3fold_ops, struct name: z3fold_zpool_ops **/
1162
1163
1164 /* content: static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)*/
1165 /* LDV_COMMENT_BEGIN_PREP */
1166 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1167 #define NCHUNKS_ORDER 6
1168 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1169 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1170 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1171 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1172 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1173 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1174 #define BUDDY_MASK (0x3)
1175 #define for_each_unbuddied_list(_iter, _begin) \
1176 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1177 #define BIG_CHUNK_GAP 3
1178 /* LDV_COMMENT_END_PREP */
1179 /* LDV_COMMENT_FUNCTION_CALL Function from field "evict" from driver structure with callbacks "z3fold_zpool_ops" */
1180 ldv_handler_precall();
1181 z3fold_zpool_evict( var_group1, var_z3fold_zpool_evict_20_p1);
1182
1183
1184
1185
1186 }
1187
1188 break;
1189 case 1: {
1190
1191 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1192
1193
1194 /* content: static void *z3fold_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool)*/
1195 /* LDV_COMMENT_BEGIN_PREP */
1196 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1197 #define NCHUNKS_ORDER 6
1198 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1199 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1200 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1201 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1202 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1203 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1204 #define BUDDY_MASK (0x3)
1205 #define for_each_unbuddied_list(_iter, _begin) \
1206 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1207 #define BIG_CHUNK_GAP 3
1208 /* LDV_COMMENT_END_PREP */
1209 /* LDV_COMMENT_FUNCTION_CALL Function from field "create" from driver structure with callbacks "z3fold_zpool_driver" */
1210 ldv_handler_precall();
1211 z3fold_zpool_create( var_z3fold_zpool_create_21_p0, var_z3fold_zpool_create_21_p1, var_z3fold_zpool_create_21_p2, var_z3fold_zpool_create_21_p3);
1212
1213
1214
1215
1216 }
1217
1218 break;
1219 case 2: {
1220
1221 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1222
1223
1224 /* content: static void z3fold_zpool_destroy(void *pool)*/
1225 /* LDV_COMMENT_BEGIN_PREP */
1226 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1227 #define NCHUNKS_ORDER 6
1228 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1229 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1230 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1231 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1232 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1233 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1234 #define BUDDY_MASK (0x3)
1235 #define for_each_unbuddied_list(_iter, _begin) \
1236 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1237 #define BIG_CHUNK_GAP 3
1238 /* LDV_COMMENT_END_PREP */
1239 /* LDV_COMMENT_FUNCTION_CALL Function from field "destroy" from driver structure with callbacks "z3fold_zpool_driver" */
1240 ldv_handler_precall();
1241 z3fold_zpool_destroy( var_z3fold_zpool_destroy_22_p0);
1242
1243
1244
1245
1246 }
1247
1248 break;
1249 case 3: {
1250
1251 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1252
1253
1254 /* content: static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle)*/
1255 /* LDV_COMMENT_BEGIN_PREP */
1256 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1257 #define NCHUNKS_ORDER 6
1258 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1259 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1260 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1261 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1262 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1263 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1264 #define BUDDY_MASK (0x3)
1265 #define for_each_unbuddied_list(_iter, _begin) \
1266 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1267 #define BIG_CHUNK_GAP 3
1268 /* LDV_COMMENT_END_PREP */
1269 /* LDV_COMMENT_FUNCTION_CALL Function from field "malloc" from driver structure with callbacks "z3fold_zpool_driver" */
1270 ldv_handler_precall();
1271 z3fold_zpool_malloc( var_z3fold_zpool_malloc_23_p0, var_z3fold_zpool_malloc_23_p1, var_z3fold_zpool_malloc_23_p2, var_z3fold_zpool_malloc_23_p3);
1272
1273
1274
1275
1276 }
1277
1278 break;
1279 case 4: {
1280
1281 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1282
1283
1284 /* content: static void z3fold_zpool_free(void *pool, unsigned long handle)*/
1285 /* LDV_COMMENT_BEGIN_PREP */
1286 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1287 #define NCHUNKS_ORDER 6
1288 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1289 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1290 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1291 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1292 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1293 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1294 #define BUDDY_MASK (0x3)
1295 #define for_each_unbuddied_list(_iter, _begin) \
1296 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1297 #define BIG_CHUNK_GAP 3
1298 /* LDV_COMMENT_END_PREP */
1299 /* LDV_COMMENT_FUNCTION_CALL Function from field "free" from driver structure with callbacks "z3fold_zpool_driver" */
1300 ldv_handler_precall();
1301 z3fold_zpool_free( var_z3fold_zpool_free_24_p0, var_z3fold_zpool_free_24_p1);
1302
1303
1304
1305
1306 }
1307
1308 break;
1309 case 5: {
1310
1311 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1312
1313
1314 /* content: static int z3fold_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed)*/
1315 /* LDV_COMMENT_BEGIN_PREP */
1316 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1317 #define NCHUNKS_ORDER 6
1318 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1319 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1320 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1321 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1322 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1323 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1324 #define BUDDY_MASK (0x3)
1325 #define for_each_unbuddied_list(_iter, _begin) \
1326 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1327 #define BIG_CHUNK_GAP 3
1328 /* LDV_COMMENT_END_PREP */
1329 /* LDV_COMMENT_FUNCTION_CALL Function from field "shrink" from driver structure with callbacks "z3fold_zpool_driver" */
1330 ldv_handler_precall();
1331 z3fold_zpool_shrink( var_z3fold_zpool_shrink_25_p0, var_z3fold_zpool_shrink_25_p1, var_z3fold_zpool_shrink_25_p2);
1332
1333
1334
1335
1336 }
1337
1338 break;
1339 case 6: {
1340
1341 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1342
1343
1344 /* content: static void *z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm)*/
1345 /* LDV_COMMENT_BEGIN_PREP */
1346 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1347 #define NCHUNKS_ORDER 6
1348 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1349 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1350 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1351 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1352 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1353 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1354 #define BUDDY_MASK (0x3)
1355 #define for_each_unbuddied_list(_iter, _begin) \
1356 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1357 #define BIG_CHUNK_GAP 3
1358 /* LDV_COMMENT_END_PREP */
1359 /* LDV_COMMENT_FUNCTION_CALL Function from field "map" from driver structure with callbacks "z3fold_zpool_driver" */
1360 ldv_handler_precall();
1361 z3fold_zpool_map( var_z3fold_zpool_map_26_p0, var_z3fold_zpool_map_26_p1, var_z3fold_zpool_map_26_p2);
1362
1363
1364
1365
1366 }
1367
1368 break;
1369 case 7: {
1370
1371 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1372
1373
1374 /* content: static void z3fold_zpool_unmap(void *pool, unsigned long handle)*/
1375 /* LDV_COMMENT_BEGIN_PREP */
1376 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1377 #define NCHUNKS_ORDER 6
1378 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1379 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1380 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1381 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1382 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1383 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1384 #define BUDDY_MASK (0x3)
1385 #define for_each_unbuddied_list(_iter, _begin) \
1386 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1387 #define BIG_CHUNK_GAP 3
1388 /* LDV_COMMENT_END_PREP */
1389 /* LDV_COMMENT_FUNCTION_CALL Function from field "unmap" from driver structure with callbacks "z3fold_zpool_driver" */
1390 ldv_handler_precall();
1391 z3fold_zpool_unmap( var_z3fold_zpool_unmap_27_p0, var_z3fold_zpool_unmap_27_p1);
1392
1393
1394
1395
1396 }
1397
1398 break;
1399 case 8: {
1400
1401 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1402
1403
1404 /* content: static u64 z3fold_zpool_total_size(void *pool)*/
1405 /* LDV_COMMENT_BEGIN_PREP */
1406 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1407 #define NCHUNKS_ORDER 6
1408 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1409 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1410 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1411 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1412 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1413 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1414 #define BUDDY_MASK (0x3)
1415 #define for_each_unbuddied_list(_iter, _begin) \
1416 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1417 #define BIG_CHUNK_GAP 3
1418 /* LDV_COMMENT_END_PREP */
1419 /* LDV_COMMENT_FUNCTION_CALL Function from field "total_size" from driver structure with callbacks "z3fold_zpool_driver" */
1420 ldv_handler_precall();
1421 z3fold_zpool_total_size( var_z3fold_zpool_total_size_28_p0);
1422
1423
1424
1425
1426 }
1427
1428 break;
1429 default: break;
1430
1431 }
1432
1433 }
1434
1435 ldv_module_exit:
1436
1437 /** INIT: init_type: ST_MODULE_EXIT **/
1438 /* content: static void __exit exit_z3fold(void)*/
1439 /* LDV_COMMENT_BEGIN_PREP */
1440 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1441 #define NCHUNKS_ORDER 6
1442 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1443 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1444 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1445 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1446 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1447 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1448 #define BUDDY_MASK (0x3)
1449 #define for_each_unbuddied_list(_iter, _begin) \
1450 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1451 #define BIG_CHUNK_GAP 3
1452 /* LDV_COMMENT_END_PREP */
1453 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
1454 ldv_handler_precall();
1455 exit_z3fold();
1456
1457 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
1458 ldv_final: ldv_check_final_state();
1459
1460 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
1461 return;
1462
1463 }
1464 #endif
1465
1466 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10
11 extern void __ldv_spin_lock(spinlock_t *lock);
12 extern void __ldv_spin_unlock(spinlock_t *lock);
13 extern int __ldv_spin_trylock(spinlock_t *lock);
14 extern void __ldv_spin_unlock_wait(spinlock_t *lock);
15 extern void __ldv_spin_can_lock(spinlock_t *lock);
16 extern int __ldv_atomic_dec_and_lock(spinlock_t *lock);
17
18 extern void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void);
19 extern void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void);
20 extern int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void);
21 extern void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void);
22 extern int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void);
23 extern int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void);
24 extern int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void);
25 extern int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void);
26 extern void ldv_spin_lock_lock_of_z3fold_pool(void);
27 extern void ldv_spin_unlock_lock_of_z3fold_pool(void);
28 extern int ldv_spin_trylock_lock_of_z3fold_pool(void);
29 extern void ldv_spin_unlock_wait_lock_of_z3fold_pool(void);
30 extern int ldv_spin_is_locked_lock_of_z3fold_pool(void);
31 extern int ldv_spin_can_lock_lock_of_z3fold_pool(void);
32 extern int ldv_spin_is_contended_lock_of_z3fold_pool(void);
33 extern int ldv_atomic_dec_and_lock_lock_of_z3fold_pool(void);
34 extern void ldv_spin_lock_node_size_lock_of_pglist_data(void);
35 extern void ldv_spin_unlock_node_size_lock_of_pglist_data(void);
36 extern int ldv_spin_trylock_node_size_lock_of_pglist_data(void);
37 extern void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void);
38 extern int ldv_spin_is_locked_node_size_lock_of_pglist_data(void);
39 extern int ldv_spin_can_lock_node_size_lock_of_pglist_data(void);
40 extern int ldv_spin_is_contended_node_size_lock_of_pglist_data(void);
41 extern int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void);
42 extern void ldv_spin_lock_page_lock_of_z3fold_header(void);
43 extern void ldv_spin_unlock_page_lock_of_z3fold_header(void);
44 extern int ldv_spin_trylock_page_lock_of_z3fold_header(void);
45 extern void ldv_spin_unlock_wait_page_lock_of_z3fold_header(void);
46 extern int ldv_spin_is_locked_page_lock_of_z3fold_header(void);
47 extern int ldv_spin_can_lock_page_lock_of_z3fold_header(void);
48 extern int ldv_spin_is_contended_page_lock_of_z3fold_header(void);
49 extern int ldv_atomic_dec_and_lock_page_lock_of_z3fold_header(void);
50 extern void ldv_spin_lock_ptl(void);
51 extern void ldv_spin_unlock_ptl(void);
52 extern int ldv_spin_trylock_ptl(void);
53 extern void ldv_spin_unlock_wait_ptl(void);
54 extern int ldv_spin_is_locked_ptl(void);
55 extern int ldv_spin_can_lock_ptl(void);
56 extern int ldv_spin_is_contended_ptl(void);
57 extern int ldv_atomic_dec_and_lock_ptl(void);
58 #line 1 "/work/ldvuser/ref_launch/work/current--X--mm--X--defaultlinux-4.11-rc1.tar.xz--X--39_7a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/19/dscv_tempdir/dscv/ri/39_7a/mm/z3fold.c"
59
60 /*
61 * z3fold.c
62 *
63 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
64 * Copyright (C) 2016, Sony Mobile Communications Inc.
65 *
66 * This implementation is based on zbud written by Seth Jennings.
67 *
68 * z3fold is an special purpose allocator for storing compressed pages. It
69 * can store up to three compressed pages per page which improves the
70 * compression ratio of zbud while retaining its main concepts (e. g. always
71 * storing an integral number of objects per page) and simplicity.
72 * It still has simple and deterministic reclaim properties that make it
73 * preferable to a higher density approach (with no requirement on integral
74 * number of object per page) when reclaim is used.
75 *
76 * As in zbud, pages are divided into "chunks". The size of the chunks is
77 * fixed at compile time and is determined by NCHUNKS_ORDER below.
78 *
79 * z3fold doesn't export any API and is meant to be used via zpool API.
80 */
81
82 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
83
84 #include <linux/atomic.h>
85 #include <linux/list.h>
86 #include <linux/mm.h>
87 #include <linux/module.h>
88 #include <linux/preempt.h>
89 #include <linux/slab.h>
90 #include <linux/spinlock.h>
91 #include <linux/zpool.h>
92
93 /*****************
94 * Structures
95 *****************/
96 struct z3fold_pool;
97 struct z3fold_ops {
98 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
99 };
100
101 enum buddy {
102 HEADLESS = 0,
103 FIRST,
104 MIDDLE,
105 LAST,
106 BUDDIES_MAX
107 };
108
109 /*
110 * struct z3fold_header - z3fold page metadata occupying the first chunk of each
111 * z3fold page, except for HEADLESS pages
112 * @buddy: links the z3fold page into the relevant list in the pool
113 * @page_lock: per-page lock
114 * @refcount: reference cound for the z3fold page
115 * @first_chunks: the size of the first buddy in chunks, 0 if free
116 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
117 * @last_chunks: the size of the last buddy in chunks, 0 if free
118 * @first_num: the starting number (for the first handle)
119 */
120 struct z3fold_header {
121 struct list_head buddy;
122 spinlock_t page_lock;
123 struct kref refcount;
124 unsigned short first_chunks;
125 unsigned short middle_chunks;
126 unsigned short last_chunks;
127 unsigned short start_middle;
128 unsigned short first_num:2;
129 };
130
131 /*
132 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
133 * adjusting internal fragmentation. It also determines the number of
134 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
135 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
136 * in the beginning of an allocated page are occupied by z3fold header, so
137 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
138 * which shows the max number of free chunks in z3fold page, also there will
139 * be 63, or 62, respectively, freelists per pool.
140 */
141 #define NCHUNKS_ORDER 6
142
143 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
144 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
145 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
146 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
147 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
148 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
149
150 #define BUDDY_MASK (0x3)
151
152 /**
153 * struct z3fold_pool - stores metadata for each z3fold pool
154 * @lock: protects all pool fields and first|last_chunk fields of any
155 * z3fold page in the pool
156 * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
157 * the lists each z3fold page is added to depends on the size of
158 * its free region.
159 * @lru: list tracking the z3fold pages in LRU order by most recently
160 * added buddy.
161 * @pages_nr: number of z3fold pages in the pool.
162 * @ops: pointer to a structure of user defined operations specified at
163 * pool creation time.
164 *
165 * This structure is allocated at pool creation time and maintains metadata
166 * pertaining to a particular z3fold pool.
167 */
168 struct z3fold_pool {
169 spinlock_t lock;
170 struct list_head unbuddied[NCHUNKS];
171 struct list_head lru;
172 atomic64_t pages_nr;
173 const struct z3fold_ops *ops;
174 struct zpool *zpool;
175 const struct zpool_ops *zpool_ops;
176 };
177
178 /*
179 * Internal z3fold page flags
180 */
181 enum z3fold_page_flags {
182 PAGE_HEADLESS = 0,
183 MIDDLE_CHUNK_MAPPED,
184 };
185
186
187 /*****************
188 * Helpers
189 *****************/
190
191 /* Converts an allocation size in bytes to size in z3fold chunks */
192 static int size_to_chunks(size_t size)
193 {
194 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
195 }
196
197 #define for_each_unbuddied_list(_iter, _begin) \
198 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
199
200 /* Initializes the z3fold header of a newly allocated z3fold page */
201 static struct z3fold_header *init_z3fold_page(struct page *page)
202 {
203 struct z3fold_header *zhdr = page_address(page);
204
205 INIT_LIST_HEAD(&page->lru);
206 clear_bit(PAGE_HEADLESS, &page->private);
207 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
208
209 spin_lock_init(&zhdr->page_lock);
210 kref_init(&zhdr->refcount);
211 zhdr->first_chunks = 0;
212 zhdr->middle_chunks = 0;
213 zhdr->last_chunks = 0;
214 zhdr->first_num = 0;
215 zhdr->start_middle = 0;
216 INIT_LIST_HEAD(&zhdr->buddy);
217 return zhdr;
218 }
219
220 /* Resets the struct page fields and frees the page */
221 static void free_z3fold_page(struct page *page)
222 {
223 __free_page(page);
224 }
225
226 static void release_z3fold_page(struct kref *ref)
227 {
228 struct z3fold_header *zhdr;
229 struct page *page;
230
231 zhdr = container_of(ref, struct z3fold_header, refcount);
232 page = virt_to_page(zhdr);
233
234 if (!list_empty(&zhdr->buddy))
235 list_del(&zhdr->buddy);
236 if (!list_empty(&page->lru))
237 list_del(&page->lru);
238 free_z3fold_page(page);
239 }
240
241 /* Lock a z3fold page */
242 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
243 {
244 spin_lock(&zhdr->page_lock);
245 }
246
247 /* Unlock a z3fold page */
248 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
249 {
250 spin_unlock(&zhdr->page_lock);
251 }
252
253 /*
254 * Encodes the handle of a particular buddy within a z3fold page
255 * Pool lock should be held as this function accesses first_num
256 */
257 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
258 {
259 unsigned long handle;
260
261 handle = (unsigned long)zhdr;
262 if (bud != HEADLESS)
263 handle += (bud + zhdr->first_num) & BUDDY_MASK;
264 return handle;
265 }
266
267 /* Returns the z3fold page where a given handle is stored */
268 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
269 {
270 return (struct z3fold_header *)(handle & PAGE_MASK);
271 }
272
273 /*
274 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
275 * but that doesn't matter. because the masking will result in the
276 * correct buddy number.
277 */
278 static enum buddy handle_to_buddy(unsigned long handle)
279 {
280 struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
281 return (handle - zhdr->first_num) & BUDDY_MASK;
282 }
283
284 /*
285 * Returns the number of free chunks in a z3fold page.
286 * NB: can't be used with HEADLESS pages.
287 */
288 static int num_free_chunks(struct z3fold_header *zhdr)
289 {
290 int nfree;
291 /*
292 * If there is a middle object, pick up the bigger free space
293 * either before or after it. Otherwise just subtract the number
294 * of chunks occupied by the first and the last objects.
295 */
296 if (zhdr->middle_chunks != 0) {
297 int nfree_before = zhdr->first_chunks ?
298 0 : zhdr->start_middle - ZHDR_CHUNKS;
299 int nfree_after = zhdr->last_chunks ?
300 0 : TOTAL_CHUNKS -
301 (zhdr->start_middle + zhdr->middle_chunks);
302 nfree = max(nfree_before, nfree_after);
303 } else
304 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
305 return nfree;
306 }
307
308 /*****************
309 * API Functions
310 *****************/
311 /**
312 * z3fold_create_pool() - create a new z3fold pool
313 * @gfp: gfp flags when allocating the z3fold pool structure
314 * @ops: user-defined operations for the z3fold pool
315 *
316 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
317 * failed.
318 */
319 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
320 const struct z3fold_ops *ops)
321 {
322 struct z3fold_pool *pool;
323 int i;
324
325 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
326 if (!pool)
327 return NULL;
328 spin_lock_init(&pool->lock);
329 for_each_unbuddied_list(i, 0)
330 INIT_LIST_HEAD(&pool->unbuddied[i]);
331 INIT_LIST_HEAD(&pool->lru);
332 atomic64_set(&pool->pages_nr, 0);
333 pool->ops = ops;
334 return pool;
335 }
336
337 /**
338 * z3fold_destroy_pool() - destroys an existing z3fold pool
339 * @pool: the z3fold pool to be destroyed
340 *
341 * The pool should be emptied before this function is called.
342 */
343 static void z3fold_destroy_pool(struct z3fold_pool *pool)
344 {
345 kfree(pool);
346 }
347
348 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
349 unsigned short dst_chunk)
350 {
351 void *beg = zhdr;
352 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
353 beg + (zhdr->start_middle << CHUNK_SHIFT),
354 zhdr->middle_chunks << CHUNK_SHIFT);
355 }
356
357 #define BIG_CHUNK_GAP 3
358 /* Has to be called with lock held */
359 static int z3fold_compact_page(struct z3fold_header *zhdr)
360 {
361 struct page *page = virt_to_page(zhdr);
362
363 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
364 return 0; /* can't move middle chunk, it's used */
365
366 if (zhdr->middle_chunks == 0)
367 return 0; /* nothing to compact */
368
369 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
370 /* move to the beginning */
371 mchunk_memmove(zhdr, ZHDR_CHUNKS);
372 zhdr->first_chunks = zhdr->middle_chunks;
373 zhdr->middle_chunks = 0;
374 zhdr->start_middle = 0;
375 zhdr->first_num++;
376 return 1;
377 }
378
379 /*
380 * moving data is expensive, so let's only do that if
381 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
382 */
383 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
384 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
385 BIG_CHUNK_GAP) {
386 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
387 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
388 return 1;
389 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
390 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
391 + zhdr->middle_chunks) >=
392 BIG_CHUNK_GAP) {
393 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
394 zhdr->middle_chunks;
395 mchunk_memmove(zhdr, new_start);
396 zhdr->start_middle = new_start;
397 return 1;
398 }
399
400 return 0;
401 }
402
403 /**
404 * z3fold_alloc() - allocates a region of a given size
405 * @pool: z3fold pool from which to allocate
406 * @size: size in bytes of the desired allocation
407 * @gfp: gfp flags used if the pool needs to grow
408 * @handle: handle of the new allocation
409 *
410 * This function will attempt to find a free region in the pool large enough to
411 * satisfy the allocation request. A search of the unbuddied lists is
412 * performed first. If no suitable free region is found, then a new page is
413 * allocated and added to the pool to satisfy the request.
414 *
415 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
416 * as z3fold pool pages.
417 *
418 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
419 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
420 * a new page.
421 */
422 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
423 unsigned long *handle)
424 {
425 int chunks = 0, i, freechunks;
426 struct z3fold_header *zhdr = NULL;
427 enum buddy bud;
428 struct page *page;
429
430 if (!size || (gfp & __GFP_HIGHMEM))
431 return -EINVAL;
432
433 if (size > PAGE_SIZE)
434 return -ENOSPC;
435
436 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
437 bud = HEADLESS;
438 else {
439 chunks = size_to_chunks(size);
440
441 /* First, try to find an unbuddied z3fold page. */
442 zhdr = NULL;
443 for_each_unbuddied_list(i, chunks) {
444 spin_lock(&pool->lock);
445 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
446 struct z3fold_header, buddy);
447 if (!zhdr) {
448 spin_unlock(&pool->lock);
449 continue;
450 }
451 kref_get(&zhdr->refcount);
452 list_del_init(&zhdr->buddy);
453 spin_unlock(&pool->lock);
454
455 page = virt_to_page(zhdr);
456 z3fold_page_lock(zhdr);
457 if (zhdr->first_chunks == 0) {
458 if (zhdr->middle_chunks != 0 &&
459 chunks >= zhdr->start_middle)
460 bud = LAST;
461 else
462 bud = FIRST;
463 } else if (zhdr->last_chunks == 0)
464 bud = LAST;
465 else if (zhdr->middle_chunks == 0)
466 bud = MIDDLE;
467 else {
468 z3fold_page_unlock(zhdr);
469 spin_lock(&pool->lock);
470 if (kref_put(&zhdr->refcount,
471 release_z3fold_page))
472 atomic64_dec(&pool->pages_nr);
473 spin_unlock(&pool->lock);
474 pr_err("No free chunks in unbuddied\n");
475 WARN_ON(1);
476 continue;
477 }
478 goto found;
479 }
480 bud = FIRST;
481 }
482
483 /* Couldn't find unbuddied z3fold page, create new one */
484 page = alloc_page(gfp);
485 if (!page)
486 return -ENOMEM;
487
488 atomic64_inc(&pool->pages_nr);
489 zhdr = init_z3fold_page(page);
490
491 if (bud == HEADLESS) {
492 set_bit(PAGE_HEADLESS, &page->private);
493 spin_lock(&pool->lock);
494 goto headless;
495 }
496 z3fold_page_lock(zhdr);
497
498 found:
499 if (bud == FIRST)
500 zhdr->first_chunks = chunks;
501 else if (bud == LAST)
502 zhdr->last_chunks = chunks;
503 else {
504 zhdr->middle_chunks = chunks;
505 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
506 }
507
508 spin_lock(&pool->lock);
509 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
510 zhdr->middle_chunks == 0) {
511 /* Add to unbuddied list */
512 freechunks = num_free_chunks(zhdr);
513 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
514 }
515
516 headless:
517 /* Add/move z3fold page to beginning of LRU */
518 if (!list_empty(&page->lru))
519 list_del(&page->lru);
520
521 list_add(&page->lru, &pool->lru);
522
523 *handle = encode_handle(zhdr, bud);
524 spin_unlock(&pool->lock);
525 if (bud != HEADLESS)
526 z3fold_page_unlock(zhdr);
527
528 return 0;
529 }
530
531 /**
532 * z3fold_free() - frees the allocation associated with the given handle
533 * @pool: pool in which the allocation resided
534 * @handle: handle associated with the allocation returned by z3fold_alloc()
535 *
536 * In the case that the z3fold page in which the allocation resides is under
537 * reclaim, as indicated by the PG_reclaim flag being set, this function
538 * only sets the first|last_chunks to 0. The page is actually freed
539 * once both buddies are evicted (see z3fold_reclaim_page() below).
540 */
541 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
542 {
543 struct z3fold_header *zhdr;
544 int freechunks;
545 struct page *page;
546 enum buddy bud;
547
548 zhdr = handle_to_z3fold_header(handle);
549 page = virt_to_page(zhdr);
550
551 if (test_bit(PAGE_HEADLESS, &page->private)) {
552 /* HEADLESS page stored */
553 bud = HEADLESS;
554 } else {
555 z3fold_page_lock(zhdr);
556 bud = handle_to_buddy(handle);
557
558 switch (bud) {
559 case FIRST:
560 zhdr->first_chunks = 0;
561 break;
562 case MIDDLE:
563 zhdr->middle_chunks = 0;
564 zhdr->start_middle = 0;
565 break;
566 case LAST:
567 zhdr->last_chunks = 0;
568 break;
569 default:
570 pr_err("%s: unknown bud %d\n", __func__, bud);
571 WARN_ON(1);
572 z3fold_page_unlock(zhdr);
573 return;
574 }
575 }
576
577 if (bud == HEADLESS) {
578 spin_lock(&pool->lock);
579 list_del(&page->lru);
580 spin_unlock(&pool->lock);
581 free_z3fold_page(page);
582 atomic64_dec(&pool->pages_nr);
583 } else {
584 if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 ||
585 zhdr->last_chunks != 0) {
586 z3fold_compact_page(zhdr);
587 /* Add to the unbuddied list */
588 spin_lock(&pool->lock);
589 if (!list_empty(&zhdr->buddy))
590 list_del(&zhdr->buddy);
591 freechunks = num_free_chunks(zhdr);
592 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
593 spin_unlock(&pool->lock);
594 }
595 z3fold_page_unlock(zhdr);
596 spin_lock(&pool->lock);
597 if (kref_put(&zhdr->refcount, release_z3fold_page))
598 atomic64_dec(&pool->pages_nr);
599 spin_unlock(&pool->lock);
600 }
601
602 }
603
604 /**
605 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
606 * @pool: pool from which a page will attempt to be evicted
607 * @retires: number of pages on the LRU list for which eviction will
608 * be attempted before failing
609 *
610 * z3fold reclaim is different from normal system reclaim in that it is done
611 * from the bottom, up. This is because only the bottom layer, z3fold, has
612 * information on how the allocations are organized within each z3fold page.
613 * This has the potential to create interesting locking situations between
614 * z3fold and the user, however.
615 *
616 * To avoid these, this is how z3fold_reclaim_page() should be called:
617
618 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
619 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
620 * call the user-defined eviction handler with the pool and handle as
621 * arguments.
622 *
623 * If the handle can not be evicted, the eviction handler should return
624 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
625 * appropriate list and try the next z3fold page on the LRU up to
626 * a user defined number of retries.
627 *
628 * If the handle is successfully evicted, the eviction handler should
629 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
630 * contains logic to delay freeing the page if the page is under reclaim,
631 * as indicated by the setting of the PG_reclaim flag on the underlying page.
632 *
633 * If all buddies in the z3fold page are successfully evicted, then the
634 * z3fold page can be freed.
635 *
636 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
637 * no pages to evict or an eviction handler is not registered, -EAGAIN if
638 * the retry limit was hit.
639 */
640 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
641 {
642 int i, ret = 0, freechunks;
643 struct z3fold_header *zhdr;
644 struct page *page;
645 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
646
647 spin_lock(&pool->lock);
648 if (!pool->ops || !pool->ops->evict || retries == 0) {
649 spin_unlock(&pool->lock);
650 return -EINVAL;
651 }
652 for (i = 0; i < retries; i++) {
653 if (list_empty(&pool->lru)) {
654 spin_unlock(&pool->lock);
655 return -EINVAL;
656 }
657 page = list_last_entry(&pool->lru, struct page, lru);
658 list_del_init(&page->lru);
659
660 zhdr = page_address(page);
661 if (!test_bit(PAGE_HEADLESS, &page->private)) {
662 if (!list_empty(&zhdr->buddy))
663 list_del_init(&zhdr->buddy);
664 kref_get(&zhdr->refcount);
665 spin_unlock(&pool->lock);
666 z3fold_page_lock(zhdr);
667 /*
668 * We need encode the handles before unlocking, since
669 * we can race with free that will set
670 * (first|last)_chunks to 0
671 */
672 first_handle = 0;
673 last_handle = 0;
674 middle_handle = 0;
675 if (zhdr->first_chunks)
676 first_handle = encode_handle(zhdr, FIRST);
677 if (zhdr->middle_chunks)
678 middle_handle = encode_handle(zhdr, MIDDLE);
679 if (zhdr->last_chunks)
680 last_handle = encode_handle(zhdr, LAST);
681 z3fold_page_unlock(zhdr);
682 } else {
683 first_handle = encode_handle(zhdr, HEADLESS);
684 last_handle = middle_handle = 0;
685 spin_unlock(&pool->lock);
686 }
687
688 /* Issue the eviction callback(s) */
689 if (middle_handle) {
690 ret = pool->ops->evict(pool, middle_handle);
691 if (ret)
692 goto next;
693 }
694 if (first_handle) {
695 ret = pool->ops->evict(pool, first_handle);
696 if (ret)
697 goto next;
698 }
699 if (last_handle) {
700 ret = pool->ops->evict(pool, last_handle);
701 if (ret)
702 goto next;
703 }
704 next:
705 if (test_bit(PAGE_HEADLESS, &page->private)) {
706 if (ret == 0) {
707 free_z3fold_page(page);
708 return 0;
709 } else {
710 spin_lock(&pool->lock);
711 }
712 } else {
713 z3fold_page_lock(zhdr);
714 if ((zhdr->first_chunks || zhdr->last_chunks ||
715 zhdr->middle_chunks) &&
716 !(zhdr->first_chunks && zhdr->last_chunks &&
717 zhdr->middle_chunks)) {
718 z3fold_compact_page(zhdr);
719 /* add to unbuddied list */
720 spin_lock(&pool->lock);
721 freechunks = num_free_chunks(zhdr);
722 list_add(&zhdr->buddy,
723 &pool->unbuddied[freechunks]);
724 spin_unlock(&pool->lock);
725 }
726 z3fold_page_unlock(zhdr);
727 spin_lock(&pool->lock);
728 if (kref_put(&zhdr->refcount, release_z3fold_page)) {
729 atomic64_dec(&pool->pages_nr);
730 return 0;
731 }
732 }
733
734 /*
735 * Add to the beginning of LRU.
736 * Pool lock has to be kept here to ensure the page has
737 * not already been released
738 */
739 list_add(&page->lru, &pool->lru);
740 }
741 spin_unlock(&pool->lock);
742 return -EAGAIN;
743 }
744
745 /**
746 * z3fold_map() - maps the allocation associated with the given handle
747 * @pool: pool in which the allocation resides
748 * @handle: handle associated with the allocation to be mapped
749 *
750 * Extracts the buddy number from handle and constructs the pointer to the
751 * correct starting chunk within the page.
752 *
753 * Returns: a pointer to the mapped allocation
754 */
755 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
756 {
757 struct z3fold_header *zhdr;
758 struct page *page;
759 void *addr;
760 enum buddy buddy;
761
762 zhdr = handle_to_z3fold_header(handle);
763 addr = zhdr;
764 page = virt_to_page(zhdr);
765
766 if (test_bit(PAGE_HEADLESS, &page->private))
767 goto out;
768
769 z3fold_page_lock(zhdr);
770 buddy = handle_to_buddy(handle);
771 switch (buddy) {
772 case FIRST:
773 addr += ZHDR_SIZE_ALIGNED;
774 break;
775 case MIDDLE:
776 addr += zhdr->start_middle << CHUNK_SHIFT;
777 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
778 break;
779 case LAST:
780 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
781 break;
782 default:
783 pr_err("unknown buddy id %d\n", buddy);
784 WARN_ON(1);
785 addr = NULL;
786 break;
787 }
788
789 z3fold_page_unlock(zhdr);
790 out:
791 return addr;
792 }
793
794 /**
795 * z3fold_unmap() - unmaps the allocation associated with the given handle
796 * @pool: pool in which the allocation resides
797 * @handle: handle associated with the allocation to be unmapped
798 */
799 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
800 {
801 struct z3fold_header *zhdr;
802 struct page *page;
803 enum buddy buddy;
804
805 zhdr = handle_to_z3fold_header(handle);
806 page = virt_to_page(zhdr);
807
808 if (test_bit(PAGE_HEADLESS, &page->private))
809 return;
810
811 z3fold_page_lock(zhdr);
812 buddy = handle_to_buddy(handle);
813 if (buddy == MIDDLE)
814 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
815 z3fold_page_unlock(zhdr);
816 }
817
818 /**
819 * z3fold_get_pool_size() - gets the z3fold pool size in pages
820 * @pool: pool whose size is being queried
821 *
822 * Returns: size in pages of the given pool.
823 */
824 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
825 {
826 return atomic64_read(&pool->pages_nr);
827 }
828
829 /*****************
830 * zpool
831 ****************/
832
833 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
834 {
835 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
836 return pool->zpool_ops->evict(pool->zpool, handle);
837 else
838 return -ENOENT;
839 }
840
841 static const struct z3fold_ops z3fold_zpool_ops = {
842 .evict = z3fold_zpool_evict
843 };
844
845 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
846 const struct zpool_ops *zpool_ops,
847 struct zpool *zpool)
848 {
849 struct z3fold_pool *pool;
850
851 pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
852 if (pool) {
853 pool->zpool = zpool;
854 pool->zpool_ops = zpool_ops;
855 }
856 return pool;
857 }
858
859 static void z3fold_zpool_destroy(void *pool)
860 {
861 z3fold_destroy_pool(pool);
862 }
863
864 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
865 unsigned long *handle)
866 {
867 return z3fold_alloc(pool, size, gfp, handle);
868 }
869 static void z3fold_zpool_free(void *pool, unsigned long handle)
870 {
871 z3fold_free(pool, handle);
872 }
873
874 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
875 unsigned int *reclaimed)
876 {
877 unsigned int total = 0;
878 int ret = -EINVAL;
879
880 while (total < pages) {
881 ret = z3fold_reclaim_page(pool, 8);
882 if (ret < 0)
883 break;
884 total++;
885 }
886
887 if (reclaimed)
888 *reclaimed = total;
889
890 return ret;
891 }
892
893 static void *z3fold_zpool_map(void *pool, unsigned long handle,
894 enum zpool_mapmode mm)
895 {
896 return z3fold_map(pool, handle);
897 }
898 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
899 {
900 z3fold_unmap(pool, handle);
901 }
902
903 static u64 z3fold_zpool_total_size(void *pool)
904 {
905 return z3fold_get_pool_size(pool) * PAGE_SIZE;
906 }
907
908 static struct zpool_driver z3fold_zpool_driver = {
909 .type = "z3fold",
910 .owner = THIS_MODULE,
911 .create = z3fold_zpool_create,
912 .destroy = z3fold_zpool_destroy,
913 .malloc = z3fold_zpool_malloc,
914 .free = z3fold_zpool_free,
915 .shrink = z3fold_zpool_shrink,
916 .map = z3fold_zpool_map,
917 .unmap = z3fold_zpool_unmap,
918 .total_size = z3fold_zpool_total_size,
919 };
920
921 MODULE_ALIAS("zpool-z3fold");
922
923 static int __init init_z3fold(void)
924 {
925 /* Make sure the z3fold header is not larger than the page size */
926 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
927 zpool_register_driver(&z3fold_zpool_driver);
928
929 return 0;
930 }
931
932 static void __exit exit_z3fold(void)
933 {
934 zpool_unregister_driver(&z3fold_zpool_driver);
935 }
936
937 module_init(init_z3fold);
938 module_exit(exit_z3fold);
939
940 MODULE_LICENSE("GPL");
941 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
942 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
943
944
945
946
947
948 /* LDV_COMMENT_BEGIN_MAIN */
949 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
950
951 /*###########################################################################*/
952
953 /*############## Driver Environment Generator 0.2 output ####################*/
954
955 /*###########################################################################*/
956
957
958
959 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
960 void ldv_check_final_state(void);
961
962 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
963 void ldv_check_return_value(int res);
964
965 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
966 void ldv_check_return_value_probe(int res);
967
968 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
969 void ldv_initialize(void);
970
971 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
972 void ldv_handler_precall(void);
973
974 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
975 int nondet_int(void);
976
977 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
978 int LDV_IN_INTERRUPT;
979
980 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
981 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
982
983
984
985 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
986 /*============================= VARIABLE DECLARATION PART =============================*/
987 /** STRUCT: struct type: z3fold_ops, struct name: z3fold_zpool_ops **/
988 /* content: static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)*/
989 /* LDV_COMMENT_BEGIN_PREP */
990 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
991 #define NCHUNKS_ORDER 6
992 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
993 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
994 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
995 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
996 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
997 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
998 #define BUDDY_MASK (0x3)
999 #define for_each_unbuddied_list(_iter, _begin) \
1000 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1001 #define BIG_CHUNK_GAP 3
1002 /* LDV_COMMENT_END_PREP */
1003 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_evict" */
1004 struct z3fold_pool * var_group1;
1005 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_evict" */
1006 unsigned long var_z3fold_zpool_evict_20_p1;
1007
1008 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1009 /* content: static void *z3fold_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool)*/
1010 /* LDV_COMMENT_BEGIN_PREP */
1011 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1012 #define NCHUNKS_ORDER 6
1013 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1014 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1015 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1016 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1017 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1018 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1019 #define BUDDY_MASK (0x3)
1020 #define for_each_unbuddied_list(_iter, _begin) \
1021 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1022 #define BIG_CHUNK_GAP 3
1023 /* LDV_COMMENT_END_PREP */
1024 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
1025 const char * var_z3fold_zpool_create_21_p0;
1026 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
1027 gfp_t var_z3fold_zpool_create_21_p1;
1028 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
1029 const struct zpool_ops * var_z3fold_zpool_create_21_p2;
1030 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_create" */
1031 struct zpool * var_z3fold_zpool_create_21_p3;
1032 /* content: static void z3fold_zpool_destroy(void *pool)*/
1033 /* LDV_COMMENT_BEGIN_PREP */
1034 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1035 #define NCHUNKS_ORDER 6
1036 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1037 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1038 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1039 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1040 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1041 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1042 #define BUDDY_MASK (0x3)
1043 #define for_each_unbuddied_list(_iter, _begin) \
1044 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1045 #define BIG_CHUNK_GAP 3
1046 /* LDV_COMMENT_END_PREP */
1047 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_destroy" */
1048 void * var_z3fold_zpool_destroy_22_p0;
1049 /* content: static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle)*/
1050 /* LDV_COMMENT_BEGIN_PREP */
1051 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1052 #define NCHUNKS_ORDER 6
1053 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1054 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1055 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1056 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1057 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1058 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1059 #define BUDDY_MASK (0x3)
1060 #define for_each_unbuddied_list(_iter, _begin) \
1061 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1062 #define BIG_CHUNK_GAP 3
1063 /* LDV_COMMENT_END_PREP */
1064 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1065 void * var_z3fold_zpool_malloc_23_p0;
1066 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1067 size_t var_z3fold_zpool_malloc_23_p1;
1068 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1069 gfp_t var_z3fold_zpool_malloc_23_p2;
1070 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_malloc" */
1071 unsigned long * var_z3fold_zpool_malloc_23_p3;
1072 /* content: static void z3fold_zpool_free(void *pool, unsigned long handle)*/
1073 /* LDV_COMMENT_BEGIN_PREP */
1074 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1075 #define NCHUNKS_ORDER 6
1076 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1077 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1078 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1079 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1080 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1081 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1082 #define BUDDY_MASK (0x3)
1083 #define for_each_unbuddied_list(_iter, _begin) \
1084 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1085 #define BIG_CHUNK_GAP 3
1086 /* LDV_COMMENT_END_PREP */
1087 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_free" */
1088 void * var_z3fold_zpool_free_24_p0;
1089 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_free" */
1090 unsigned long var_z3fold_zpool_free_24_p1;
1091 /* content: static int z3fold_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed)*/
1092 /* LDV_COMMENT_BEGIN_PREP */
1093 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1094 #define NCHUNKS_ORDER 6
1095 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1096 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1097 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1098 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1099 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1100 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1101 #define BUDDY_MASK (0x3)
1102 #define for_each_unbuddied_list(_iter, _begin) \
1103 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1104 #define BIG_CHUNK_GAP 3
1105 /* LDV_COMMENT_END_PREP */
1106 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_shrink" */
1107 void * var_z3fold_zpool_shrink_25_p0;
1108 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_shrink" */
1109 unsigned int var_z3fold_zpool_shrink_25_p1;
1110 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_shrink" */
1111 unsigned int * var_z3fold_zpool_shrink_25_p2;
1112 /* content: static void *z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm)*/
1113 /* LDV_COMMENT_BEGIN_PREP */
1114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1115 #define NCHUNKS_ORDER 6
1116 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1117 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1118 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1119 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1120 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1121 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1122 #define BUDDY_MASK (0x3)
1123 #define for_each_unbuddied_list(_iter, _begin) \
1124 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1125 #define BIG_CHUNK_GAP 3
1126 /* LDV_COMMENT_END_PREP */
1127 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_map" */
1128 void * var_z3fold_zpool_map_26_p0;
1129 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_map" */
1130 unsigned long var_z3fold_zpool_map_26_p1;
1131 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_map" */
1132 enum zpool_mapmode var_z3fold_zpool_map_26_p2;
1133 /* content: static void z3fold_zpool_unmap(void *pool, unsigned long handle)*/
1134 /* LDV_COMMENT_BEGIN_PREP */
1135 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1136 #define NCHUNKS_ORDER 6
1137 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1138 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1139 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1140 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1141 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1142 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1143 #define BUDDY_MASK (0x3)
1144 #define for_each_unbuddied_list(_iter, _begin) \
1145 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1146 #define BIG_CHUNK_GAP 3
1147 /* LDV_COMMENT_END_PREP */
1148 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_unmap" */
1149 void * var_z3fold_zpool_unmap_27_p0;
1150 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_unmap" */
1151 unsigned long var_z3fold_zpool_unmap_27_p1;
1152 /* content: static u64 z3fold_zpool_total_size(void *pool)*/
1153 /* LDV_COMMENT_BEGIN_PREP */
1154 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1155 #define NCHUNKS_ORDER 6
1156 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1157 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1158 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1159 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1160 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1161 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1162 #define BUDDY_MASK (0x3)
1163 #define for_each_unbuddied_list(_iter, _begin) \
1164 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1165 #define BIG_CHUNK_GAP 3
1166 /* LDV_COMMENT_END_PREP */
1167 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "z3fold_zpool_total_size" */
1168 void * var_z3fold_zpool_total_size_28_p0;
1169
1170
1171
1172
1173 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
1174 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
1175 /*============================= VARIABLE INITIALIZING PART =============================*/
1176 LDV_IN_INTERRUPT=1;
1177
1178
1179
1180
1181 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
1182 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
1183 /*============================= FUNCTION CALL SECTION =============================*/
1184 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
1185 ldv_initialize();
1186
1187 /** INIT: init_type: ST_MODULE_INIT **/
1188 /* content: static int __init init_z3fold(void)*/
1189 /* LDV_COMMENT_BEGIN_PREP */
1190 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1191 #define NCHUNKS_ORDER 6
1192 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1193 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1194 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1195 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1196 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1197 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1198 #define BUDDY_MASK (0x3)
1199 #define for_each_unbuddied_list(_iter, _begin) \
1200 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1201 #define BIG_CHUNK_GAP 3
1202 /* LDV_COMMENT_END_PREP */
1203 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
1204 ldv_handler_precall();
1205 if(init_z3fold())
1206 goto ldv_final;
1207
1208
1209
1210
1211
1212 while( nondet_int()
1213 ) {
1214
1215 switch(nondet_int()) {
1216
1217 case 0: {
1218
1219 /** STRUCT: struct type: z3fold_ops, struct name: z3fold_zpool_ops **/
1220
1221
1222 /* content: static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)*/
1223 /* LDV_COMMENT_BEGIN_PREP */
1224 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1225 #define NCHUNKS_ORDER 6
1226 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1227 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1228 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1229 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1230 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1231 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1232 #define BUDDY_MASK (0x3)
1233 #define for_each_unbuddied_list(_iter, _begin) \
1234 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1235 #define BIG_CHUNK_GAP 3
1236 /* LDV_COMMENT_END_PREP */
1237 /* LDV_COMMENT_FUNCTION_CALL Function from field "evict" from driver structure with callbacks "z3fold_zpool_ops" */
1238 ldv_handler_precall();
1239 z3fold_zpool_evict( var_group1, var_z3fold_zpool_evict_20_p1);
1240
1241
1242
1243
1244 }
1245
1246 break;
1247 case 1: {
1248
1249 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1250
1251
1252 /* content: static void *z3fold_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool)*/
1253 /* LDV_COMMENT_BEGIN_PREP */
1254 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1255 #define NCHUNKS_ORDER 6
1256 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1257 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1258 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1259 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1260 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1261 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1262 #define BUDDY_MASK (0x3)
1263 #define for_each_unbuddied_list(_iter, _begin) \
1264 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1265 #define BIG_CHUNK_GAP 3
1266 /* LDV_COMMENT_END_PREP */
1267 /* LDV_COMMENT_FUNCTION_CALL Function from field "create" from driver structure with callbacks "z3fold_zpool_driver" */
1268 ldv_handler_precall();
1269 z3fold_zpool_create( var_z3fold_zpool_create_21_p0, var_z3fold_zpool_create_21_p1, var_z3fold_zpool_create_21_p2, var_z3fold_zpool_create_21_p3);
1270
1271
1272
1273
1274 }
1275
1276 break;
1277 case 2: {
1278
1279 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1280
1281
1282 /* content: static void z3fold_zpool_destroy(void *pool)*/
1283 /* LDV_COMMENT_BEGIN_PREP */
1284 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1285 #define NCHUNKS_ORDER 6
1286 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1287 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1288 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1289 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1290 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1291 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1292 #define BUDDY_MASK (0x3)
1293 #define for_each_unbuddied_list(_iter, _begin) \
1294 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1295 #define BIG_CHUNK_GAP 3
1296 /* LDV_COMMENT_END_PREP */
1297 /* LDV_COMMENT_FUNCTION_CALL Function from field "destroy" from driver structure with callbacks "z3fold_zpool_driver" */
1298 ldv_handler_precall();
1299 z3fold_zpool_destroy( var_z3fold_zpool_destroy_22_p0);
1300
1301
1302
1303
1304 }
1305
1306 break;
1307 case 3: {
1308
1309 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1310
1311
1312 /* content: static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle)*/
1313 /* LDV_COMMENT_BEGIN_PREP */
1314 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1315 #define NCHUNKS_ORDER 6
1316 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1317 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1318 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1319 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1320 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1321 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1322 #define BUDDY_MASK (0x3)
1323 #define for_each_unbuddied_list(_iter, _begin) \
1324 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1325 #define BIG_CHUNK_GAP 3
1326 /* LDV_COMMENT_END_PREP */
1327 /* LDV_COMMENT_FUNCTION_CALL Function from field "malloc" from driver structure with callbacks "z3fold_zpool_driver" */
1328 ldv_handler_precall();
1329 z3fold_zpool_malloc( var_z3fold_zpool_malloc_23_p0, var_z3fold_zpool_malloc_23_p1, var_z3fold_zpool_malloc_23_p2, var_z3fold_zpool_malloc_23_p3);
1330
1331
1332
1333
1334 }
1335
1336 break;
1337 case 4: {
1338
1339 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1340
1341
1342 /* content: static void z3fold_zpool_free(void *pool, unsigned long handle)*/
1343 /* LDV_COMMENT_BEGIN_PREP */
1344 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1345 #define NCHUNKS_ORDER 6
1346 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1347 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1348 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1349 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1350 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1351 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1352 #define BUDDY_MASK (0x3)
1353 #define for_each_unbuddied_list(_iter, _begin) \
1354 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1355 #define BIG_CHUNK_GAP 3
1356 /* LDV_COMMENT_END_PREP */
1357 /* LDV_COMMENT_FUNCTION_CALL Function from field "free" from driver structure with callbacks "z3fold_zpool_driver" */
1358 ldv_handler_precall();
1359 z3fold_zpool_free( var_z3fold_zpool_free_24_p0, var_z3fold_zpool_free_24_p1);
1360
1361
1362
1363
1364 }
1365
1366 break;
1367 case 5: {
1368
1369 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1370
1371
1372 /* content: static int z3fold_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed)*/
1373 /* LDV_COMMENT_BEGIN_PREP */
1374 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1375 #define NCHUNKS_ORDER 6
1376 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1377 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1378 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1379 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1380 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1381 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1382 #define BUDDY_MASK (0x3)
1383 #define for_each_unbuddied_list(_iter, _begin) \
1384 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1385 #define BIG_CHUNK_GAP 3
1386 /* LDV_COMMENT_END_PREP */
1387 /* LDV_COMMENT_FUNCTION_CALL Function from field "shrink" from driver structure with callbacks "z3fold_zpool_driver" */
1388 ldv_handler_precall();
1389 z3fold_zpool_shrink( var_z3fold_zpool_shrink_25_p0, var_z3fold_zpool_shrink_25_p1, var_z3fold_zpool_shrink_25_p2);
1390
1391
1392
1393
1394 }
1395
1396 break;
1397 case 6: {
1398
1399 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1400
1401
1402 /* content: static void *z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm)*/
1403 /* LDV_COMMENT_BEGIN_PREP */
1404 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1405 #define NCHUNKS_ORDER 6
1406 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1407 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1408 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1409 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1410 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1411 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1412 #define BUDDY_MASK (0x3)
1413 #define for_each_unbuddied_list(_iter, _begin) \
1414 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1415 #define BIG_CHUNK_GAP 3
1416 /* LDV_COMMENT_END_PREP */
1417 /* LDV_COMMENT_FUNCTION_CALL Function from field "map" from driver structure with callbacks "z3fold_zpool_driver" */
1418 ldv_handler_precall();
1419 z3fold_zpool_map( var_z3fold_zpool_map_26_p0, var_z3fold_zpool_map_26_p1, var_z3fold_zpool_map_26_p2);
1420
1421
1422
1423
1424 }
1425
1426 break;
1427 case 7: {
1428
1429 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1430
1431
1432 /* content: static void z3fold_zpool_unmap(void *pool, unsigned long handle)*/
1433 /* LDV_COMMENT_BEGIN_PREP */
1434 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1435 #define NCHUNKS_ORDER 6
1436 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1437 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1438 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1439 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1440 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1441 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1442 #define BUDDY_MASK (0x3)
1443 #define for_each_unbuddied_list(_iter, _begin) \
1444 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1445 #define BIG_CHUNK_GAP 3
1446 /* LDV_COMMENT_END_PREP */
1447 /* LDV_COMMENT_FUNCTION_CALL Function from field "unmap" from driver structure with callbacks "z3fold_zpool_driver" */
1448 ldv_handler_precall();
1449 z3fold_zpool_unmap( var_z3fold_zpool_unmap_27_p0, var_z3fold_zpool_unmap_27_p1);
1450
1451
1452
1453
1454 }
1455
1456 break;
1457 case 8: {
1458
1459 /** STRUCT: struct type: zpool_driver, struct name: z3fold_zpool_driver **/
1460
1461
1462 /* content: static u64 z3fold_zpool_total_size(void *pool)*/
1463 /* LDV_COMMENT_BEGIN_PREP */
1464 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1465 #define NCHUNKS_ORDER 6
1466 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1467 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1468 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1469 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1470 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1471 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1472 #define BUDDY_MASK (0x3)
1473 #define for_each_unbuddied_list(_iter, _begin) \
1474 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1475 #define BIG_CHUNK_GAP 3
1476 /* LDV_COMMENT_END_PREP */
1477 /* LDV_COMMENT_FUNCTION_CALL Function from field "total_size" from driver structure with callbacks "z3fold_zpool_driver" */
1478 ldv_handler_precall();
1479 z3fold_zpool_total_size( var_z3fold_zpool_total_size_28_p0);
1480
1481
1482
1483
1484 }
1485
1486 break;
1487 default: break;
1488
1489 }
1490
1491 }
1492
1493 ldv_module_exit:
1494
1495 /** INIT: init_type: ST_MODULE_EXIT **/
1496 /* content: static void __exit exit_z3fold(void)*/
1497 /* LDV_COMMENT_BEGIN_PREP */
1498 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1499 #define NCHUNKS_ORDER 6
1500 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
1501 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
1502 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
1503 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
1504 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
1505 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
1506 #define BUDDY_MASK (0x3)
1507 #define for_each_unbuddied_list(_iter, _begin) \
1508 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
1509 #define BIG_CHUNK_GAP 3
1510 /* LDV_COMMENT_END_PREP */
1511 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
1512 ldv_handler_precall();
1513 exit_z3fold();
1514
1515 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
1516 ldv_final: ldv_check_final_state();
1517
1518 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
1519 return;
1520
1521 }
1522 #endif
1523
1524 /* LDV_COMMENT_END_MAIN */
1525
1526 #line 58 "/work/ldvuser/ref_launch/work/current--X--mm--X--defaultlinux-4.11-rc1.tar.xz--X--39_7a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/19/dscv_tempdir/dscv/ri/39_7a/mm/z3fold.o.c.prepared" 1
2
3 #include <linux/kernel.h>
4 #include <linux/spinlock.h>
5
6 #include <verifier/rcv.h>
7 #include <kernel-model/ERR.inc>
8
9 static int ldv_spin_lock_of_NOT_ARG_SIGN = 1;
10
11 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and lock it */
12 void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void)
13 {
14 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked */
15 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
16 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
17 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
18 }
19
20 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was locked and unlock it */
21 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void)
22 {
23 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be locked */
24 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 2);
25 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock_of_NOT_ARG_SIGN' */
26 ldv_spin_lock_of_NOT_ARG_SIGN = 1;
27 }
28
29 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and nondeterministically lock it. Return 0 on fails */
30 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void)
31 {
32 int is_spin_held_by_another_thread;
33
34 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock_of_NOT_ARG_SIGN' is locked at this point */
35 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
36
37 /* LDV_COMMENT_OTHER Construct nondetermined result */
38 is_spin_held_by_another_thread = ldv_undef_int();
39
40 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock_of_NOT_ARG_SIGN' */
41 if (is_spin_held_by_another_thread)
42 {
43 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was not locked. Finish with fail */
44 return 0;
45 }
46 else
47 {
48 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
49 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
50 /* LDV_COMMENT_RETURN Finish with success */
51 return 1;
52 }
53 }
54
55 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN') The same process can not both lock spin 'lock_of_NOT_ARG_SIGN' and wait until it will be unlocked */
56 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void)
57 {
58 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must not be locked by a current process */
59 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
60 }
61
62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
63 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void)
64 {
65 int is_spin_held_by_another_thread;
66
67 /* LDV_COMMENT_OTHER Construct nondetermined result */
68 is_spin_held_by_another_thread = ldv_undef_int();
69
70 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' was locked */
71 if(ldv_spin_lock_of_NOT_ARG_SIGN == 1 && !is_spin_held_by_another_thread)
72 {
73 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was unlocked */
74 return 0;
75 }
76 else
77 {
78 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was locked */
79 return 1;
80 }
81 }
82
83 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
84 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void)
85 {
86 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
87 return !ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();
88 }
89
90 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' is contended */
91 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void)
92 {
93 int is_spin_contended;
94
95 /* LDV_COMMENT_OTHER Construct nondetermined result */
96 is_spin_contended = ldv_undef_int();
97
98 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' is contended */
99 if(is_spin_contended)
100 {
101 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' is contended */
102 return 0;
103 }
104 else
105 {
106 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' isn't contended */
107 return 1;
108 }
109 }
110
111 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN') Lock spin 'lock_of_NOT_ARG_SIGN' if atomic decrement result is zero */
112 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void)
113 {
114 int atomic_value_after_dec;
115
116 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked (since we may lock it in this function) */
117 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
118
119 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
120 atomic_value_after_dec = ldv_undef_int();
121
122 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
123 if (atomic_value_after_dec == 0)
124 {
125 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN', as atomic has decremented to zero */
126 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
127 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock_of_NOT_ARG_SIGN' */
128 return 1;
129 }
130
131 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock_of_NOT_ARG_SIGN' */
132 return 0;
133 }
134 static int ldv_spin_lock_of_z3fold_pool = 1;
135
136 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock_of_z3fold_pool') Check that spin 'lock_of_z3fold_pool' was not locked and lock it */
137 void ldv_spin_lock_lock_of_z3fold_pool(void)
138 {
139 /* LDV_COMMENT_ASSERT Spin 'lock_of_z3fold_pool' must be unlocked */
140 ldv_assert(ldv_spin_lock_of_z3fold_pool == 1);
141 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_z3fold_pool' */
142 ldv_spin_lock_of_z3fold_pool = 2;
143 }
144
145 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock_of_z3fold_pool') Check that spin 'lock_of_z3fold_pool' was locked and unlock it */
146 void ldv_spin_unlock_lock_of_z3fold_pool(void)
147 {
148 /* LDV_COMMENT_ASSERT Spin 'lock_of_z3fold_pool' must be locked */
149 ldv_assert(ldv_spin_lock_of_z3fold_pool == 2);
150 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock_of_z3fold_pool' */
151 ldv_spin_lock_of_z3fold_pool = 1;
152 }
153
154 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock_of_z3fold_pool') Check that spin 'lock_of_z3fold_pool' was not locked and nondeterministically lock it. Return 0 on fails */
155 int ldv_spin_trylock_lock_of_z3fold_pool(void)
156 {
157 int is_spin_held_by_another_thread;
158
159 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock_of_z3fold_pool' is locked at this point */
160 ldv_assert(ldv_spin_lock_of_z3fold_pool == 1);
161
162 /* LDV_COMMENT_OTHER Construct nondetermined result */
163 is_spin_held_by_another_thread = ldv_undef_int();
164
165 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock_of_z3fold_pool' */
166 if (is_spin_held_by_another_thread)
167 {
168 /* LDV_COMMENT_RETURN Spin 'lock_of_z3fold_pool' was not locked. Finish with fail */
169 return 0;
170 }
171 else
172 {
173 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_z3fold_pool' */
174 ldv_spin_lock_of_z3fold_pool = 2;
175 /* LDV_COMMENT_RETURN Finish with success */
176 return 1;
177 }
178 }
179
180 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock_of_z3fold_pool') The same process can not both lock spin 'lock_of_z3fold_pool' and wait until it will be unlocked */
181 void ldv_spin_unlock_wait_lock_of_z3fold_pool(void)
182 {
183 /* LDV_COMMENT_ASSERT Spin 'lock_of_z3fold_pool' must not be locked by a current process */
184 ldv_assert(ldv_spin_lock_of_z3fold_pool == 1);
185 }
186
187 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock_of_z3fold_pool') Check whether spin 'lock_of_z3fold_pool' was locked */
188 int ldv_spin_is_locked_lock_of_z3fold_pool(void)
189 {
190 int is_spin_held_by_another_thread;
191
192 /* LDV_COMMENT_OTHER Construct nondetermined result */
193 is_spin_held_by_another_thread = ldv_undef_int();
194
195 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_z3fold_pool' was locked */
196 if(ldv_spin_lock_of_z3fold_pool == 1 && !is_spin_held_by_another_thread)
197 {
198 /* LDV_COMMENT_RETURN Spin 'lock_of_z3fold_pool' was unlocked */
199 return 0;
200 }
201 else
202 {
203 /* LDV_COMMENT_RETURN Spin 'lock_of_z3fold_pool' was locked */
204 return 1;
205 }
206 }
207
208 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock_of_z3fold_pool') Check whether spin 'lock_of_z3fold_pool' was locked */
209 int ldv_spin_can_lock_lock_of_z3fold_pool(void)
210 {
211 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
212 return !ldv_spin_is_locked_lock_of_z3fold_pool();
213 }
214
215 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock_of_z3fold_pool') Check whether spin 'lock_of_z3fold_pool' is contended */
216 int ldv_spin_is_contended_lock_of_z3fold_pool(void)
217 {
218 int is_spin_contended;
219
220 /* LDV_COMMENT_OTHER Construct nondetermined result */
221 is_spin_contended = ldv_undef_int();
222
223 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_z3fold_pool' is contended */
224 if(is_spin_contended)
225 {
226 /* LDV_COMMENT_RETURN Spin 'lock_of_z3fold_pool' is contended */
227 return 0;
228 }
229 else
230 {
231 /* LDV_COMMENT_RETURN Spin 'lock_of_z3fold_pool' isn't contended */
232 return 1;
233 }
234 }
235
236 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock_of_z3fold_pool') Lock spin 'lock_of_z3fold_pool' if atomic decrement result is zero */
237 int ldv_atomic_dec_and_lock_lock_of_z3fold_pool(void)
238 {
239 int atomic_value_after_dec;
240
241 /* LDV_COMMENT_ASSERT Spin 'lock_of_z3fold_pool' must be unlocked (since we may lock it in this function) */
242 ldv_assert(ldv_spin_lock_of_z3fold_pool == 1);
243
244 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
245 atomic_value_after_dec = ldv_undef_int();
246
247 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
248 if (atomic_value_after_dec == 0)
249 {
250 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_z3fold_pool', as atomic has decremented to zero */
251 ldv_spin_lock_of_z3fold_pool = 2;
252 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock_of_z3fold_pool' */
253 return 1;
254 }
255
256 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock_of_z3fold_pool' */
257 return 0;
258 }
259 static int ldv_spin_node_size_lock_of_pglist_data = 1;
260
261 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and lock it */
262 void ldv_spin_lock_node_size_lock_of_pglist_data(void)
263 {
264 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked */
265 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
266 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
267 ldv_spin_node_size_lock_of_pglist_data = 2;
268 }
269
270 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was locked and unlock it */
271 void ldv_spin_unlock_node_size_lock_of_pglist_data(void)
272 {
273 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be locked */
274 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 2);
275 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'node_size_lock_of_pglist_data' */
276 ldv_spin_node_size_lock_of_pglist_data = 1;
277 }
278
279 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and nondeterministically lock it. Return 0 on fails */
280 int ldv_spin_trylock_node_size_lock_of_pglist_data(void)
281 {
282 int is_spin_held_by_another_thread;
283
284 /* LDV_COMMENT_ASSERT It may be an error if spin 'node_size_lock_of_pglist_data' is locked at this point */
285 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
286
287 /* LDV_COMMENT_OTHER Construct nondetermined result */
288 is_spin_held_by_another_thread = ldv_undef_int();
289
290 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'node_size_lock_of_pglist_data' */
291 if (is_spin_held_by_another_thread)
292 {
293 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was not locked. Finish with fail */
294 return 0;
295 }
296 else
297 {
298 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
299 ldv_spin_node_size_lock_of_pglist_data = 2;
300 /* LDV_COMMENT_RETURN Finish with success */
301 return 1;
302 }
303 }
304
305 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_node_size_lock_of_pglist_data') The same process can not both lock spin 'node_size_lock_of_pglist_data' and wait until it will be unlocked */
306 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void)
307 {
308 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must not be locked by a current process */
309 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
310 }
311
312 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
313 int ldv_spin_is_locked_node_size_lock_of_pglist_data(void)
314 {
315 int is_spin_held_by_another_thread;
316
317 /* LDV_COMMENT_OTHER Construct nondetermined result */
318 is_spin_held_by_another_thread = ldv_undef_int();
319
320 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' was locked */
321 if(ldv_spin_node_size_lock_of_pglist_data == 1 && !is_spin_held_by_another_thread)
322 {
323 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was unlocked */
324 return 0;
325 }
326 else
327 {
328 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was locked */
329 return 1;
330 }
331 }
332
333 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
334 int ldv_spin_can_lock_node_size_lock_of_pglist_data(void)
335 {
336 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
337 return !ldv_spin_is_locked_node_size_lock_of_pglist_data();
338 }
339
340 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' is contended */
341 int ldv_spin_is_contended_node_size_lock_of_pglist_data(void)
342 {
343 int is_spin_contended;
344
345 /* LDV_COMMENT_OTHER Construct nondetermined result */
346 is_spin_contended = ldv_undef_int();
347
348 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' is contended */
349 if(is_spin_contended)
350 {
351 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' is contended */
352 return 0;
353 }
354 else
355 {
356 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' isn't contended */
357 return 1;
358 }
359 }
360
361 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data') Lock spin 'node_size_lock_of_pglist_data' if atomic decrement result is zero */
362 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void)
363 {
364 int atomic_value_after_dec;
365
366 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked (since we may lock it in this function) */
367 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
368
369 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
370 atomic_value_after_dec = ldv_undef_int();
371
372 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
373 if (atomic_value_after_dec == 0)
374 {
375 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data', as atomic has decremented to zero */
376 ldv_spin_node_size_lock_of_pglist_data = 2;
377 /* LDV_COMMENT_RETURN Return 1 with locked spin 'node_size_lock_of_pglist_data' */
378 return 1;
379 }
380
381 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'node_size_lock_of_pglist_data' */
382 return 0;
383 }
384 static int ldv_spin_page_lock_of_z3fold_header = 1;
385
386 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_page_lock_of_z3fold_header') Check that spin 'page_lock_of_z3fold_header' was not locked and lock it */
387 void ldv_spin_lock_page_lock_of_z3fold_header(void)
388 {
389 /* LDV_COMMENT_ASSERT Spin 'page_lock_of_z3fold_header' must be unlocked */
390 ldv_assert(ldv_spin_page_lock_of_z3fold_header == 1);
391 /* LDV_COMMENT_CHANGE_STATE Lock spin 'page_lock_of_z3fold_header' */
392 ldv_spin_page_lock_of_z3fold_header = 2;
393 }
394
395 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_page_lock_of_z3fold_header') Check that spin 'page_lock_of_z3fold_header' was locked and unlock it */
396 void ldv_spin_unlock_page_lock_of_z3fold_header(void)
397 {
398 /* LDV_COMMENT_ASSERT Spin 'page_lock_of_z3fold_header' must be locked */
399 ldv_assert(ldv_spin_page_lock_of_z3fold_header == 2);
400 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'page_lock_of_z3fold_header' */
401 ldv_spin_page_lock_of_z3fold_header = 1;
402 }
403
404 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_page_lock_of_z3fold_header') Check that spin 'page_lock_of_z3fold_header' was not locked and nondeterministically lock it. Return 0 on fails */
405 int ldv_spin_trylock_page_lock_of_z3fold_header(void)
406 {
407 int is_spin_held_by_another_thread;
408
409 /* LDV_COMMENT_ASSERT It may be an error if spin 'page_lock_of_z3fold_header' is locked at this point */
410 ldv_assert(ldv_spin_page_lock_of_z3fold_header == 1);
411
412 /* LDV_COMMENT_OTHER Construct nondetermined result */
413 is_spin_held_by_another_thread = ldv_undef_int();
414
415 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'page_lock_of_z3fold_header' */
416 if (is_spin_held_by_another_thread)
417 {
418 /* LDV_COMMENT_RETURN Spin 'page_lock_of_z3fold_header' was not locked. Finish with fail */
419 return 0;
420 }
421 else
422 {
423 /* LDV_COMMENT_CHANGE_STATE Lock spin 'page_lock_of_z3fold_header' */
424 ldv_spin_page_lock_of_z3fold_header = 2;
425 /* LDV_COMMENT_RETURN Finish with success */
426 return 1;
427 }
428 }
429
430 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_page_lock_of_z3fold_header') The same process can not both lock spin 'page_lock_of_z3fold_header' and wait until it will be unlocked */
431 void ldv_spin_unlock_wait_page_lock_of_z3fold_header(void)
432 {
433 /* LDV_COMMENT_ASSERT Spin 'page_lock_of_z3fold_header' must not be locked by a current process */
434 ldv_assert(ldv_spin_page_lock_of_z3fold_header == 1);
435 }
436
437 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_page_lock_of_z3fold_header') Check whether spin 'page_lock_of_z3fold_header' was locked */
438 int ldv_spin_is_locked_page_lock_of_z3fold_header(void)
439 {
440 int is_spin_held_by_another_thread;
441
442 /* LDV_COMMENT_OTHER Construct nondetermined result */
443 is_spin_held_by_another_thread = ldv_undef_int();
444
445 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'page_lock_of_z3fold_header' was locked */
446 if(ldv_spin_page_lock_of_z3fold_header == 1 && !is_spin_held_by_another_thread)
447 {
448 /* LDV_COMMENT_RETURN Spin 'page_lock_of_z3fold_header' was unlocked */
449 return 0;
450 }
451 else
452 {
453 /* LDV_COMMENT_RETURN Spin 'page_lock_of_z3fold_header' was locked */
454 return 1;
455 }
456 }
457
458 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_page_lock_of_z3fold_header') Check whether spin 'page_lock_of_z3fold_header' was locked */
459 int ldv_spin_can_lock_page_lock_of_z3fold_header(void)
460 {
461 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
462 return !ldv_spin_is_locked_page_lock_of_z3fold_header();
463 }
464
465 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_page_lock_of_z3fold_header') Check whether spin 'page_lock_of_z3fold_header' is contended */
466 int ldv_spin_is_contended_page_lock_of_z3fold_header(void)
467 {
468 int is_spin_contended;
469
470 /* LDV_COMMENT_OTHER Construct nondetermined result */
471 is_spin_contended = ldv_undef_int();
472
473 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'page_lock_of_z3fold_header' is contended */
474 if(is_spin_contended)
475 {
476 /* LDV_COMMENT_RETURN Spin 'page_lock_of_z3fold_header' is contended */
477 return 0;
478 }
479 else
480 {
481 /* LDV_COMMENT_RETURN Spin 'page_lock_of_z3fold_header' isn't contended */
482 return 1;
483 }
484 }
485
486 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_page_lock_of_z3fold_header') Lock spin 'page_lock_of_z3fold_header' if atomic decrement result is zero */
487 int ldv_atomic_dec_and_lock_page_lock_of_z3fold_header(void)
488 {
489 int atomic_value_after_dec;
490
491 /* LDV_COMMENT_ASSERT Spin 'page_lock_of_z3fold_header' must be unlocked (since we may lock it in this function) */
492 ldv_assert(ldv_spin_page_lock_of_z3fold_header == 1);
493
494 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
495 atomic_value_after_dec = ldv_undef_int();
496
497 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
498 if (atomic_value_after_dec == 0)
499 {
500 /* LDV_COMMENT_CHANGE_STATE Lock spin 'page_lock_of_z3fold_header', as atomic has decremented to zero */
501 ldv_spin_page_lock_of_z3fold_header = 2;
502 /* LDV_COMMENT_RETURN Return 1 with locked spin 'page_lock_of_z3fold_header' */
503 return 1;
504 }
505
506 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'page_lock_of_z3fold_header' */
507 return 0;
508 }
509 static int ldv_spin_ptl = 1;
510
511 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_ptl') Check that spin 'ptl' was not locked and lock it */
512 void ldv_spin_lock_ptl(void)
513 {
514 /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked */
515 ldv_assert(ldv_spin_ptl == 1);
516 /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl' */
517 ldv_spin_ptl = 2;
518 }
519
520 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_ptl') Check that spin 'ptl' was locked and unlock it */
521 void ldv_spin_unlock_ptl(void)
522 {
523 /* LDV_COMMENT_ASSERT Spin 'ptl' must be locked */
524 ldv_assert(ldv_spin_ptl == 2);
525 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'ptl' */
526 ldv_spin_ptl = 1;
527 }
528
529 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_ptl') Check that spin 'ptl' was not locked and nondeterministically lock it. Return 0 on fails */
530 int ldv_spin_trylock_ptl(void)
531 {
532 int is_spin_held_by_another_thread;
533
534 /* LDV_COMMENT_ASSERT It may be an error if spin 'ptl' is locked at this point */
535 ldv_assert(ldv_spin_ptl == 1);
536
537 /* LDV_COMMENT_OTHER Construct nondetermined result */
538 is_spin_held_by_another_thread = ldv_undef_int();
539
540 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'ptl' */
541 if (is_spin_held_by_another_thread)
542 {
543 /* LDV_COMMENT_RETURN Spin 'ptl' was not locked. Finish with fail */
544 return 0;
545 }
546 else
547 {
548 /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl' */
549 ldv_spin_ptl = 2;
550 /* LDV_COMMENT_RETURN Finish with success */
551 return 1;
552 }
553 }
554
555 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_ptl') The same process can not both lock spin 'ptl' and wait until it will be unlocked */
556 void ldv_spin_unlock_wait_ptl(void)
557 {
558 /* LDV_COMMENT_ASSERT Spin 'ptl' must not be locked by a current process */
559 ldv_assert(ldv_spin_ptl == 1);
560 }
561
562 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_ptl') Check whether spin 'ptl' was locked */
563 int ldv_spin_is_locked_ptl(void)
564 {
565 int is_spin_held_by_another_thread;
566
567 /* LDV_COMMENT_OTHER Construct nondetermined result */
568 is_spin_held_by_another_thread = ldv_undef_int();
569
570 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'ptl' was locked */
571 if(ldv_spin_ptl == 1 && !is_spin_held_by_another_thread)
572 {
573 /* LDV_COMMENT_RETURN Spin 'ptl' was unlocked */
574 return 0;
575 }
576 else
577 {
578 /* LDV_COMMENT_RETURN Spin 'ptl' was locked */
579 return 1;
580 }
581 }
582
583 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_ptl') Check whether spin 'ptl' was locked */
584 int ldv_spin_can_lock_ptl(void)
585 {
586 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
587 return !ldv_spin_is_locked_ptl();
588 }
589
590 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_ptl') Check whether spin 'ptl' is contended */
591 int ldv_spin_is_contended_ptl(void)
592 {
593 int is_spin_contended;
594
595 /* LDV_COMMENT_OTHER Construct nondetermined result */
596 is_spin_contended = ldv_undef_int();
597
598 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'ptl' is contended */
599 if(is_spin_contended)
600 {
601 /* LDV_COMMENT_RETURN Spin 'ptl' is contended */
602 return 0;
603 }
604 else
605 {
606 /* LDV_COMMENT_RETURN Spin 'ptl' isn't contended */
607 return 1;
608 }
609 }
610
611 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_ptl') Lock spin 'ptl' if atomic decrement result is zero */
612 int ldv_atomic_dec_and_lock_ptl(void)
613 {
614 int atomic_value_after_dec;
615
616 /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked (since we may lock it in this function) */
617 ldv_assert(ldv_spin_ptl == 1);
618
619 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
620 atomic_value_after_dec = ldv_undef_int();
621
622 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
623 if (atomic_value_after_dec == 0)
624 {
625 /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl', as atomic has decremented to zero */
626 ldv_spin_ptl = 2;
627 /* LDV_COMMENT_RETURN Return 1 with locked spin 'ptl' */
628 return 1;
629 }
630
631 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'ptl' */
632 return 0;
633 }
634
635 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all spins are unlocked at the end */
636 void ldv_check_final_state(void)
637 {
638 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked at the end */
639 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
640 /* LDV_COMMENT_ASSERT Spin 'lock_of_z3fold_pool' must be unlocked at the end */
641 ldv_assert(ldv_spin_lock_of_z3fold_pool == 1);
642 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked at the end */
643 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
644 /* LDV_COMMENT_ASSERT Spin 'page_lock_of_z3fold_header' must be unlocked at the end */
645 ldv_assert(ldv_spin_page_lock_of_z3fold_header == 1);
646 /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked at the end */
647 ldv_assert(ldv_spin_ptl == 1);
648 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3
4 #ifndef __ASSEMBLY__
5
6 #ifdef __CHECKER__
7 # define __user __attribute__((noderef, address_space(1)))
8 # define __kernel __attribute__((address_space(0)))
9 # define __safe __attribute__((safe))
10 # define __force __attribute__((force))
11 # define __nocast __attribute__((nocast))
12 # define __iomem __attribute__((noderef, address_space(2)))
13 # define __must_hold(x) __attribute__((context(x,1,1)))
14 # define __acquires(x) __attribute__((context(x,0,1)))
15 # define __releases(x) __attribute__((context(x,1,0)))
16 # define __acquire(x) __context__(x,1)
17 # define __release(x) __context__(x,-1)
18 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu __attribute__((noderef, address_space(3)))
20 #ifdef CONFIG_SPARSE_RCU_POINTER
21 # define __rcu __attribute__((noderef, address_space(4)))
22 #else /* CONFIG_SPARSE_RCU_POINTER */
23 # define __rcu
24 #endif /* CONFIG_SPARSE_RCU_POINTER */
25 # define __private __attribute__((noderef))
26 extern void __chk_user_ptr(const volatile void __user *);
27 extern void __chk_io_ptr(const volatile void __iomem *);
28 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
29 #else /* __CHECKER__ */
30 # ifdef STRUCTLEAK_PLUGIN
31 # define __user __attribute__((user))
32 # else
33 # define __user
34 # endif
35 # define __kernel
36 # define __safe
37 # define __force
38 # define __nocast
39 # define __iomem
40 # define __chk_user_ptr(x) (void)0
41 # define __chk_io_ptr(x) (void)0
42 # define __builtin_warning(x, y...) (1)
43 # define __must_hold(x)
44 # define __acquires(x)
45 # define __releases(x)
46 # define __acquire(x) (void)0
47 # define __release(x) (void)0
48 # define __cond_lock(x,c) (c)
49 # define __percpu
50 # define __rcu
51 # define __private
52 # define ACCESS_PRIVATE(p, member) ((p)->member)
53 #endif /* __CHECKER__ */
54
55 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
56 #define ___PASTE(a,b) a##b
57 #define __PASTE(a,b) ___PASTE(a,b)
58
59 #ifdef __KERNEL__
60
61 #ifdef __GNUC__
62 #include <linux/compiler-gcc.h>
63 #endif
64
65 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
66 #define notrace __attribute__((hotpatch(0,0)))
67 #else
68 #define notrace __attribute__((no_instrument_function))
69 #endif
70
71 /* Intel compiler defines __GNUC__. So we will overwrite implementations
72 * coming from above header files here
73 */
74 #ifdef __INTEL_COMPILER
75 # include <linux/compiler-intel.h>
76 #endif
77
78 /* Clang compiler defines __GNUC__. So we will overwrite implementations
79 * coming from above header files here
80 */
81 #ifdef __clang__
82 #include <linux/compiler-clang.h>
83 #endif
84
85 /*
86 * Generic compiler-dependent macros required for kernel
87 * build go below this comment. Actual compiler/compiler version
88 * specific implementations come from the above header files
89 */
90
91 struct ftrace_branch_data {
92 const char *func;
93 const char *file;
94 unsigned line;
95 union {
96 struct {
97 unsigned long correct;
98 unsigned long incorrect;
99 };
100 struct {
101 unsigned long miss;
102 unsigned long hit;
103 };
104 unsigned long miss_hit[2];
105 };
106 };
107
108 struct ftrace_likely_data {
109 struct ftrace_branch_data data;
110 unsigned long constant;
111 };
112
113 /*
114 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
115 * to disable branch tracing on a per file basis.
116 */
117 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
118 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
119 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
120 int expect, int is_constant);
121
122 #define likely_notrace(x) __builtin_expect(!!(x), 1)
123 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
124
125 #define __branch_check__(x, expect, is_constant) ({ \
126 int ______r; \
127 static struct ftrace_likely_data \
128 __attribute__((__aligned__(4))) \
129 __attribute__((section("_ftrace_annotated_branch"))) \
130 ______f = { \
131 .data.func = __func__, \
132 .data.file = __FILE__, \
133 .data.line = __LINE__, \
134 }; \
135 ______r = __builtin_expect(!!(x), expect); \
136 ftrace_likely_update(&______f, ______r, \
137 expect, is_constant); \
138 ______r; \
139 })
140
141 /*
142 * Using __builtin_constant_p(x) to ignore cases where the return
143 * value is always the same. This idea is taken from a similar patch
144 * written by Daniel Walker.
145 */
146 # ifndef likely
147 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
148 # endif
149 # ifndef unlikely
150 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
151 # endif
152
153 #ifdef CONFIG_PROFILE_ALL_BRANCHES
154 /*
155 * "Define 'is'", Bill Clinton
156 * "Define 'if'", Steven Rostedt
157 */
158 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
159 #define __trace_if(cond) \
160 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
161 ({ \
162 int ______r; \
163 static struct ftrace_branch_data \
164 __attribute__((__aligned__(4))) \
165 __attribute__((section("_ftrace_branch"))) \
166 ______f = { \
167 .func = __func__, \
168 .file = __FILE__, \
169 .line = __LINE__, \
170 }; \
171 ______r = !!(cond); \
172 ______f.miss_hit[______r]++; \
173 ______r; \
174 }))
175 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
176
177 #else
178 # define likely(x) __builtin_expect(!!(x), 1)
179 # define unlikely(x) __builtin_expect(!!(x), 0)
180 #endif
181
182 /* Optimization barrier */
183 #ifndef barrier
184 # define barrier() __memory_barrier()
185 #endif
186
187 #ifndef barrier_data
188 # define barrier_data(ptr) barrier()
189 #endif
190
191 /* Unreachable code */
192 #ifndef unreachable
193 # define unreachable() do { } while (1)
194 #endif
195
196 /*
197 * KENTRY - kernel entry point
198 * This can be used to annotate symbols (functions or data) that are used
199 * without their linker symbol being referenced explicitly. For example,
200 * interrupt vector handlers, or functions in the kernel image that are found
201 * programatically.
202 *
203 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
204 * are handled in their own way (with KEEP() in linker scripts).
205 *
206 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
207 * linker script. For example an architecture could KEEP() its entire
208 * boot/exception vector code rather than annotate each function and data.
209 */
210 #ifndef KENTRY
211 # define KENTRY(sym) \
212 extern typeof(sym) sym; \
213 static const unsigned long __kentry_##sym \
214 __used \
215 __attribute__((section("___kentry" "+" #sym ), used)) \
216 = (unsigned long)&sym;
217 #endif
218
219 #ifndef RELOC_HIDE
220 # define RELOC_HIDE(ptr, off) \
221 ({ unsigned long __ptr; \
222 __ptr = (unsigned long) (ptr); \
223 (typeof(ptr)) (__ptr + (off)); })
224 #endif
225
226 #ifndef OPTIMIZER_HIDE_VAR
227 #define OPTIMIZER_HIDE_VAR(var) barrier()
228 #endif
229
230 /* Not-quite-unique ID. */
231 #ifndef __UNIQUE_ID
232 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
233 #endif
234
235 #include <uapi/linux/types.h>
236
237 #define __READ_ONCE_SIZE \
238 ({ \
239 switch (size) { \
240 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
241 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
242 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
243 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
244 default: \
245 barrier(); \
246 __builtin_memcpy((void *)res, (const void *)p, size); \
247 barrier(); \
248 } \
249 })
250
251 static __always_inline
252 void __read_once_size(const volatile void *p, void *res, int size)
253 {
254 __READ_ONCE_SIZE;
255 }
256
257 #ifdef CONFIG_KASAN
258 /*
259 * This function is not 'inline' because __no_sanitize_address confilcts
260 * with inlining. Attempt to inline it may cause a build failure.
261 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
262 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
263 */
264 static __no_sanitize_address __maybe_unused
265 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
266 {
267 __READ_ONCE_SIZE;
268 }
269 #else
270 static __always_inline
271 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
272 {
273 __READ_ONCE_SIZE;
274 }
275 #endif
276
277 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
278 {
279 switch (size) {
280 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
281 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
282 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
283 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
284 default:
285 barrier();
286 __builtin_memcpy((void *)p, (const void *)res, size);
287 barrier();
288 }
289 }
290
291 /*
292 * Prevent the compiler from merging or refetching reads or writes. The
293 * compiler is also forbidden from reordering successive instances of
294 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
295 * compiler is aware of some particular ordering. One way to make the
296 * compiler aware of ordering is to put the two invocations of READ_ONCE,
297 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
298 *
299 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
300 * data types like structs or unions. If the size of the accessed data
301 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
302 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
303 * least two memcpy()s: one for the __builtin_memcpy() and then one for
304 * the macro doing the copy of variable - '__u' allocated on the stack.
305 *
306 * Their two major use cases are: (1) Mediating communication between
307 * process-level code and irq/NMI handlers, all running on the same CPU,
308 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
309 * mutilate accesses that either do not require ordering or that interact
310 * with an explicit memory barrier or atomic instruction that provides the
311 * required ordering.
312 */
313
314 #define __READ_ONCE(x, check) \
315 ({ \
316 union { typeof(x) __val; char __c[1]; } __u; \
317 if (check) \
318 __read_once_size(&(x), __u.__c, sizeof(x)); \
319 else \
320 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
321 __u.__val; \
322 })
323 #define READ_ONCE(x) __READ_ONCE(x, 1)
324
325 /*
326 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
327 * to hide memory access from KASAN.
328 */
329 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
330
331 #define WRITE_ONCE(x, val) \
332 ({ \
333 union { typeof(x) __val; char __c[1]; } __u = \
334 { .__val = (__force typeof(x)) (val) }; \
335 __write_once_size(&(x), __u.__c, sizeof(x)); \
336 __u.__val; \
337 })
338
339 #endif /* __KERNEL__ */
340
341 #endif /* __ASSEMBLY__ */
342
343 #ifdef __KERNEL__
344 /*
345 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
346 * warning for each use, in hopes of speeding the functions removal.
347 * Usage is:
348 * int __deprecated foo(void)
349 */
350 #ifndef __deprecated
351 # define __deprecated /* unimplemented */
352 #endif
353
354 #ifdef MODULE
355 #define __deprecated_for_modules __deprecated
356 #else
357 #define __deprecated_for_modules
358 #endif
359
360 #ifndef __must_check
361 #define __must_check
362 #endif
363
364 #ifndef CONFIG_ENABLE_MUST_CHECK
365 #undef __must_check
366 #define __must_check
367 #endif
368 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
369 #undef __deprecated
370 #undef __deprecated_for_modules
371 #define __deprecated
372 #define __deprecated_for_modules
373 #endif
374
375 #ifndef __malloc
376 #define __malloc
377 #endif
378
379 /*
380 * Allow us to avoid 'defined but not used' warnings on functions and data,
381 * as well as force them to be emitted to the assembly file.
382 *
383 * As of gcc 3.4, static functions that are not marked with attribute((used))
384 * may be elided from the assembly file. As of gcc 3.4, static data not so
385 * marked will not be elided, but this may change in a future gcc version.
386 *
387 * NOTE: Because distributions shipped with a backported unit-at-a-time
388 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
389 * for gcc >=3.3 instead of 3.4.
390 *
391 * In prior versions of gcc, such functions and data would be emitted, but
392 * would be warned about except with attribute((unused)).
393 *
394 * Mark functions that are referenced only in inline assembly as __used so
395 * the code is emitted even though it appears to be unreferenced.
396 */
397 #ifndef __used
398 # define __used /* unimplemented */
399 #endif
400
401 #ifndef __maybe_unused
402 # define __maybe_unused /* unimplemented */
403 #endif
404
405 #ifndef __always_unused
406 # define __always_unused /* unimplemented */
407 #endif
408
409 #ifndef noinline
410 #define noinline
411 #endif
412
413 /*
414 * Rather then using noinline to prevent stack consumption, use
415 * noinline_for_stack instead. For documentation reasons.
416 */
417 #define noinline_for_stack noinline
418
419 #ifndef __always_inline
420 #define __always_inline inline
421 #endif
422
423 #endif /* __KERNEL__ */
424
425 /*
426 * From the GCC manual:
427 *
428 * Many functions do not examine any values except their arguments,
429 * and have no effects except the return value. Basically this is
430 * just slightly more strict class than the `pure' attribute above,
431 * since function is not allowed to read global memory.
432 *
433 * Note that a function that has pointer arguments and examines the
434 * data pointed to must _not_ be declared `const'. Likewise, a
435 * function that calls a non-`const' function usually must not be
436 * `const'. It does not make sense for a `const' function to return
437 * `void'.
438 */
439 #ifndef __attribute_const__
440 # define __attribute_const__ /* unimplemented */
441 #endif
442
443 #ifndef __latent_entropy
444 # define __latent_entropy
445 #endif
446
447 /*
448 * Tell gcc if a function is cold. The compiler will assume any path
449 * directly leading to the call is unlikely.
450 */
451
452 #ifndef __cold
453 #define __cold
454 #endif
455
456 /* Simple shorthand for a section definition */
457 #ifndef __section
458 # define __section(S) __attribute__ ((__section__(#S)))
459 #endif
460
461 #ifndef __visible
462 #define __visible
463 #endif
464
465 /*
466 * Assume alignment of return value.
467 */
468 #ifndef __assume_aligned
469 #define __assume_aligned(a, ...)
470 #endif
471
472
473 /* Are two types/vars the same type (ignoring qualifiers)? */
474 #ifndef __same_type
475 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
476 #endif
477
478 /* Is this type a native word size -- useful for atomic operations */
479 #ifndef __native_word
480 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
481 #endif
482
483 /* Compile time object size, -1 for unknown */
484 #ifndef __compiletime_object_size
485 # define __compiletime_object_size(obj) -1
486 #endif
487 #ifndef __compiletime_warning
488 # define __compiletime_warning(message)
489 #endif
490 #ifndef __compiletime_error
491 # define __compiletime_error(message)
492 /*
493 * Sparse complains of variable sized arrays due to the temporary variable in
494 * __compiletime_assert. Unfortunately we can't just expand it out to make
495 * sparse see a constant array size without breaking compiletime_assert on old
496 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
497 */
498 # ifndef __CHECKER__
499 # define __compiletime_error_fallback(condition) \
500 do { } while (0)
501 # endif
502 #endif
503 #ifndef __compiletime_error_fallback
504 # define __compiletime_error_fallback(condition) do { } while (0)
505 #endif
506
507 #define __compiletime_assert(condition, msg, prefix, suffix) \
508 do { \
509 bool __cond = !(condition); \
510 extern void prefix ## suffix(void) __compiletime_error(msg); \
511 if (__cond) \
512 prefix ## suffix(); \
513 __compiletime_error_fallback(__cond); \
514 } while (0)
515
516 #define _compiletime_assert(condition, msg, prefix, suffix) \
517 __compiletime_assert(condition, msg, prefix, suffix)
518
519 /**
520 * compiletime_assert - break build and emit msg if condition is false
521 * @condition: a compile-time constant condition to check
522 * @msg: a message to emit if condition is false
523 *
524 * In tradition of POSIX assert, this macro will break the build if the
525 * supplied condition is *false*, emitting the supplied error message if the
526 * compiler has support to do so.
527 */
528 #define compiletime_assert(condition, msg) \
529 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
530
531 #define compiletime_assert_atomic_type(t) \
532 compiletime_assert(__native_word(t), \
533 "Need native word sized stores/loads for atomicity.")
534
535 /*
536 * Prevent the compiler from merging or refetching accesses. The compiler
537 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
538 * but only when the compiler is aware of some particular ordering. One way
539 * to make the compiler aware of ordering is to put the two invocations of
540 * ACCESS_ONCE() in different C statements.
541 *
542 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
543 * on a union member will work as long as the size of the member matches the
544 * size of the union and the size is smaller than word size.
545 *
546 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
547 * between process-level code and irq/NMI handlers, all running on the same CPU,
548 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
549 * mutilate accesses that either do not require ordering or that interact
550 * with an explicit memory barrier or atomic instruction that provides the
551 * required ordering.
552 *
553 * If possible use READ_ONCE()/WRITE_ONCE() instead.
554 */
555 #define __ACCESS_ONCE(x) ({ \
556 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
557 (volatile typeof(x) *)&(x); })
558 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
559
560 /**
561 * lockless_dereference() - safely load a pointer for later dereference
562 * @p: The pointer to load
563 *
564 * Similar to rcu_dereference(), but for situations where the pointed-to
565 * object's lifetime is managed by something other than RCU. That
566 * "something other" might be reference counting or simple immortality.
567 *
568 * The seemingly unused variable ___typecheck_p validates that @p is
569 * indeed a pointer type by using a pointer to typeof(*p) as the type.
570 * Taking a pointer to typeof(*p) again is needed in case p is void *.
571 */
572 #define lockless_dereference(p) \
573 ({ \
574 typeof(p) _________p1 = READ_ONCE(p); \
575 typeof(*(p)) *___typecheck_p __maybe_unused; \
576 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
577 (_________p1); \
578 })
579
580 #endif /* __LINUX_COMPILER_H */ 1 /*
2 * kref.h - library routines for handling generic reference counted objects
3 *
4 * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
5 * Copyright (C) 2004 IBM Corp.
6 *
7 * based on kobject.h which was:
8 * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
9 * Copyright (C) 2002-2003 Open Source Development Labs
10 *
11 * This file is released under the GPLv2.
12 *
13 */
14
15 #ifndef _KREF_H_
16 #define _KREF_H_
17
18 #include <linux/spinlock.h>
19 #include <linux/refcount.h>
20
21 struct kref {
22 refcount_t refcount;
23 };
24
25 #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
26
27 /**
28 * kref_init - initialize object.
29 * @kref: object in question.
30 */
31 static inline void kref_init(struct kref *kref)
32 {
33 refcount_set(&kref->refcount, 1);
34 }
35
36 static inline unsigned int kref_read(const struct kref *kref)
37 {
38 return refcount_read(&kref->refcount);
39 }
40
41 /**
42 * kref_get - increment refcount for object.
43 * @kref: object.
44 */
45 static inline void kref_get(struct kref *kref)
46 {
47 refcount_inc(&kref->refcount);
48 }
49
50 /**
51 * kref_put - decrement refcount for object.
52 * @kref: object.
53 * @release: pointer to the function that will clean up the object when the
54 * last reference to the object is released.
55 * This pointer is required, and it is not acceptable to pass kfree
56 * in as this function. If the caller does pass kfree to this
57 * function, you will be publicly mocked mercilessly by the kref
58 * maintainer, and anyone else who happens to notice it. You have
59 * been warned.
60 *
61 * Decrement the refcount, and if 0, call release().
62 * Return 1 if the object was removed, otherwise return 0. Beware, if this
63 * function returns 0, you still can not count on the kref from remaining in
64 * memory. Only use the return value if you want to see if the kref is now
65 * gone, not present.
66 */
67 static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
68 {
69 WARN_ON(release == NULL);
70
71 if (refcount_dec_and_test(&kref->refcount)) {
72 release(kref);
73 return 1;
74 }
75 return 0;
76 }
77
78 static inline int kref_put_mutex(struct kref *kref,
79 void (*release)(struct kref *kref),
80 struct mutex *lock)
81 {
82 WARN_ON(release == NULL);
83
84 if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
85 release(kref);
86 return 1;
87 }
88 return 0;
89 }
90
91 static inline int kref_put_lock(struct kref *kref,
92 void (*release)(struct kref *kref),
93 spinlock_t *lock)
94 {
95 WARN_ON(release == NULL);
96
97 if (refcount_dec_and_lock(&kref->refcount, lock)) {
98 release(kref);
99 return 1;
100 }
101 return 0;
102 }
103
104 /**
105 * kref_get_unless_zero - Increment refcount for object unless it is zero.
106 * @kref: object.
107 *
108 * Return non-zero if the increment succeeded. Otherwise return 0.
109 *
110 * This function is intended to simplify locking around refcounting for
111 * objects that can be looked up from a lookup structure, and which are
112 * removed from that lookup structure in the object destructor.
113 * Operations on such objects require at least a read lock around
114 * lookup + kref_get, and a write lock around kref_put + remove from lookup
115 * structure. Furthermore, RCU implementations become extremely tricky.
116 * With a lookup followed by a kref_get_unless_zero *with return value check*
117 * locking in the kref_put path can be deferred to the actual removal from
118 * the lookup structure and RCU lookups become trivial.
119 */
120 static inline int __must_check kref_get_unless_zero(struct kref *kref)
121 {
122 return refcount_inc_not_zero(&kref->refcount);
123 }
124 #endif /* _KREF_H_ */ 1 #ifndef _LINUX_LIST_H
2 #define _LINUX_LIST_H
3
4 #include <linux/types.h>
5 #include <linux/stddef.h>
6 #include <linux/poison.h>
7 #include <linux/const.h>
8 #include <linux/kernel.h>
9
10 /*
11 * Simple doubly linked list implementation.
12 *
13 * Some of the internal functions ("__xxx") are useful when
14 * manipulating whole lists rather than single entries, as
15 * sometimes we already know the next/prev entries and we can
16 * generate better code by using them directly rather than
17 * using the generic single-entry routines.
18 */
19
20 #define LIST_HEAD_INIT(name) { &(name), &(name) }
21
22 #define LIST_HEAD(name) \
23 struct list_head name = LIST_HEAD_INIT(name)
24
25 static inline void INIT_LIST_HEAD(struct list_head *list)
26 {
27 WRITE_ONCE(list->next, list);
28 list->prev = list;
29 }
30
31 #ifdef CONFIG_DEBUG_LIST
32 extern bool __list_add_valid(struct list_head *new,
33 struct list_head *prev,
34 struct list_head *next);
35 extern bool __list_del_entry_valid(struct list_head *entry);
36 #else
37 static inline bool __list_add_valid(struct list_head *new,
38 struct list_head *prev,
39 struct list_head *next)
40 {
41 return true;
42 }
43 static inline bool __list_del_entry_valid(struct list_head *entry)
44 {
45 return true;
46 }
47 #endif
48
49 /*
50 * Insert a new entry between two known consecutive entries.
51 *
52 * This is only for internal list manipulation where we know
53 * the prev/next entries already!
54 */
55 static inline void __list_add(struct list_head *new,
56 struct list_head *prev,
57 struct list_head *next)
58 {
59 if (!__list_add_valid(new, prev, next))
60 return;
61
62 next->prev = new;
63 new->next = next;
64 new->prev = prev;
65 WRITE_ONCE(prev->next, new);
66 }
67
68 /**
69 * list_add - add a new entry
70 * @new: new entry to be added
71 * @head: list head to add it after
72 *
73 * Insert a new entry after the specified head.
74 * This is good for implementing stacks.
75 */
76 static inline void list_add(struct list_head *new, struct list_head *head)
77 {
78 __list_add(new, head, head->next);
79 }
80
81
82 /**
83 * list_add_tail - add a new entry
84 * @new: new entry to be added
85 * @head: list head to add it before
86 *
87 * Insert a new entry before the specified head.
88 * This is useful for implementing queues.
89 */
90 static inline void list_add_tail(struct list_head *new, struct list_head *head)
91 {
92 __list_add(new, head->prev, head);
93 }
94
95 /*
96 * Delete a list entry by making the prev/next entries
97 * point to each other.
98 *
99 * This is only for internal list manipulation where we know
100 * the prev/next entries already!
101 */
102 static inline void __list_del(struct list_head * prev, struct list_head * next)
103 {
104 next->prev = prev;
105 WRITE_ONCE(prev->next, next);
106 }
107
108 /**
109 * list_del - deletes entry from list.
110 * @entry: the element to delete from the list.
111 * Note: list_empty() on entry does not return true after this, the entry is
112 * in an undefined state.
113 */
114 static inline void __list_del_entry(struct list_head *entry)
115 {
116 if (!__list_del_entry_valid(entry))
117 return;
118
119 __list_del(entry->prev, entry->next);
120 }
121
122 static inline void list_del(struct list_head *entry)
123 {
124 __list_del_entry(entry);
125 entry->next = LIST_POISON1;
126 entry->prev = LIST_POISON2;
127 }
128
129 /**
130 * list_replace - replace old entry by new one
131 * @old : the element to be replaced
132 * @new : the new element to insert
133 *
134 * If @old was empty, it will be overwritten.
135 */
136 static inline void list_replace(struct list_head *old,
137 struct list_head *new)
138 {
139 new->next = old->next;
140 new->next->prev = new;
141 new->prev = old->prev;
142 new->prev->next = new;
143 }
144
145 static inline void list_replace_init(struct list_head *old,
146 struct list_head *new)
147 {
148 list_replace(old, new);
149 INIT_LIST_HEAD(old);
150 }
151
152 /**
153 * list_del_init - deletes entry from list and reinitialize it.
154 * @entry: the element to delete from the list.
155 */
156 static inline void list_del_init(struct list_head *entry)
157 {
158 __list_del_entry(entry);
159 INIT_LIST_HEAD(entry);
160 }
161
162 /**
163 * list_move - delete from one list and add as another's head
164 * @list: the entry to move
165 * @head: the head that will precede our entry
166 */
167 static inline void list_move(struct list_head *list, struct list_head *head)
168 {
169 __list_del_entry(list);
170 list_add(list, head);
171 }
172
173 /**
174 * list_move_tail - delete from one list and add as another's tail
175 * @list: the entry to move
176 * @head: the head that will follow our entry
177 */
178 static inline void list_move_tail(struct list_head *list,
179 struct list_head *head)
180 {
181 __list_del_entry(list);
182 list_add_tail(list, head);
183 }
184
185 /**
186 * list_is_last - tests whether @list is the last entry in list @head
187 * @list: the entry to test
188 * @head: the head of the list
189 */
190 static inline int list_is_last(const struct list_head *list,
191 const struct list_head *head)
192 {
193 return list->next == head;
194 }
195
196 /**
197 * list_empty - tests whether a list is empty
198 * @head: the list to test.
199 */
200 static inline int list_empty(const struct list_head *head)
201 {
202 return READ_ONCE(head->next) == head;
203 }
204
205 /**
206 * list_empty_careful - tests whether a list is empty and not being modified
207 * @head: the list to test
208 *
209 * Description:
210 * tests whether a list is empty _and_ checks that no other CPU might be
211 * in the process of modifying either member (next or prev)
212 *
213 * NOTE: using list_empty_careful() without synchronization
214 * can only be safe if the only activity that can happen
215 * to the list entry is list_del_init(). Eg. it cannot be used
216 * if another CPU could re-list_add() it.
217 */
218 static inline int list_empty_careful(const struct list_head *head)
219 {
220 struct list_head *next = head->next;
221 return (next == head) && (next == head->prev);
222 }
223
224 /**
225 * list_rotate_left - rotate the list to the left
226 * @head: the head of the list
227 */
228 static inline void list_rotate_left(struct list_head *head)
229 {
230 struct list_head *first;
231
232 if (!list_empty(head)) {
233 first = head->next;
234 list_move_tail(first, head);
235 }
236 }
237
238 /**
239 * list_is_singular - tests whether a list has just one entry.
240 * @head: the list to test.
241 */
242 static inline int list_is_singular(const struct list_head *head)
243 {
244 return !list_empty(head) && (head->next == head->prev);
245 }
246
247 static inline void __list_cut_position(struct list_head *list,
248 struct list_head *head, struct list_head *entry)
249 {
250 struct list_head *new_first = entry->next;
251 list->next = head->next;
252 list->next->prev = list;
253 list->prev = entry;
254 entry->next = list;
255 head->next = new_first;
256 new_first->prev = head;
257 }
258
259 /**
260 * list_cut_position - cut a list into two
261 * @list: a new list to add all removed entries
262 * @head: a list with entries
263 * @entry: an entry within head, could be the head itself
264 * and if so we won't cut the list
265 *
266 * This helper moves the initial part of @head, up to and
267 * including @entry, from @head to @list. You should
268 * pass on @entry an element you know is on @head. @list
269 * should be an empty list or a list you do not care about
270 * losing its data.
271 *
272 */
273 static inline void list_cut_position(struct list_head *list,
274 struct list_head *head, struct list_head *entry)
275 {
276 if (list_empty(head))
277 return;
278 if (list_is_singular(head) &&
279 (head->next != entry && head != entry))
280 return;
281 if (entry == head)
282 INIT_LIST_HEAD(list);
283 else
284 __list_cut_position(list, head, entry);
285 }
286
287 static inline void __list_splice(const struct list_head *list,
288 struct list_head *prev,
289 struct list_head *next)
290 {
291 struct list_head *first = list->next;
292 struct list_head *last = list->prev;
293
294 first->prev = prev;
295 prev->next = first;
296
297 last->next = next;
298 next->prev = last;
299 }
300
301 /**
302 * list_splice - join two lists, this is designed for stacks
303 * @list: the new list to add.
304 * @head: the place to add it in the first list.
305 */
306 static inline void list_splice(const struct list_head *list,
307 struct list_head *head)
308 {
309 if (!list_empty(list))
310 __list_splice(list, head, head->next);
311 }
312
313 /**
314 * list_splice_tail - join two lists, each list being a queue
315 * @list: the new list to add.
316 * @head: the place to add it in the first list.
317 */
318 static inline void list_splice_tail(struct list_head *list,
319 struct list_head *head)
320 {
321 if (!list_empty(list))
322 __list_splice(list, head->prev, head);
323 }
324
325 /**
326 * list_splice_init - join two lists and reinitialise the emptied list.
327 * @list: the new list to add.
328 * @head: the place to add it in the first list.
329 *
330 * The list at @list is reinitialised
331 */
332 static inline void list_splice_init(struct list_head *list,
333 struct list_head *head)
334 {
335 if (!list_empty(list)) {
336 __list_splice(list, head, head->next);
337 INIT_LIST_HEAD(list);
338 }
339 }
340
341 /**
342 * list_splice_tail_init - join two lists and reinitialise the emptied list
343 * @list: the new list to add.
344 * @head: the place to add it in the first list.
345 *
346 * Each of the lists is a queue.
347 * The list at @list is reinitialised
348 */
349 static inline void list_splice_tail_init(struct list_head *list,
350 struct list_head *head)
351 {
352 if (!list_empty(list)) {
353 __list_splice(list, head->prev, head);
354 INIT_LIST_HEAD(list);
355 }
356 }
357
358 /**
359 * list_entry - get the struct for this entry
360 * @ptr: the &struct list_head pointer.
361 * @type: the type of the struct this is embedded in.
362 * @member: the name of the list_head within the struct.
363 */
364 #define list_entry(ptr, type, member) \
365 container_of(ptr, type, member)
366
367 /**
368 * list_first_entry - get the first element from a list
369 * @ptr: the list head to take the element from.
370 * @type: the type of the struct this is embedded in.
371 * @member: the name of the list_head within the struct.
372 *
373 * Note, that list is expected to be not empty.
374 */
375 #define list_first_entry(ptr, type, member) \
376 list_entry((ptr)->next, type, member)
377
378 /**
379 * list_last_entry - get the last element from a list
380 * @ptr: the list head to take the element from.
381 * @type: the type of the struct this is embedded in.
382 * @member: the name of the list_head within the struct.
383 *
384 * Note, that list is expected to be not empty.
385 */
386 #define list_last_entry(ptr, type, member) \
387 list_entry((ptr)->prev, type, member)
388
389 /**
390 * list_first_entry_or_null - get the first element from a list
391 * @ptr: the list head to take the element from.
392 * @type: the type of the struct this is embedded in.
393 * @member: the name of the list_head within the struct.
394 *
395 * Note that if the list is empty, it returns NULL.
396 */
397 #define list_first_entry_or_null(ptr, type, member) ({ \
398 struct list_head *head__ = (ptr); \
399 struct list_head *pos__ = READ_ONCE(head__->next); \
400 pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
401 })
402
403 /**
404 * list_next_entry - get the next element in list
405 * @pos: the type * to cursor
406 * @member: the name of the list_head within the struct.
407 */
408 #define list_next_entry(pos, member) \
409 list_entry((pos)->member.next, typeof(*(pos)), member)
410
411 /**
412 * list_prev_entry - get the prev element in list
413 * @pos: the type * to cursor
414 * @member: the name of the list_head within the struct.
415 */
416 #define list_prev_entry(pos, member) \
417 list_entry((pos)->member.prev, typeof(*(pos)), member)
418
419 /**
420 * list_for_each - iterate over a list
421 * @pos: the &struct list_head to use as a loop cursor.
422 * @head: the head for your list.
423 */
424 #define list_for_each(pos, head) \
425 for (pos = (head)->next; pos != (head); pos = pos->next)
426
427 /**
428 * list_for_each_prev - iterate over a list backwards
429 * @pos: the &struct list_head to use as a loop cursor.
430 * @head: the head for your list.
431 */
432 #define list_for_each_prev(pos, head) \
433 for (pos = (head)->prev; pos != (head); pos = pos->prev)
434
435 /**
436 * list_for_each_safe - iterate over a list safe against removal of list entry
437 * @pos: the &struct list_head to use as a loop cursor.
438 * @n: another &struct list_head to use as temporary storage
439 * @head: the head for your list.
440 */
441 #define list_for_each_safe(pos, n, head) \
442 for (pos = (head)->next, n = pos->next; pos != (head); \
443 pos = n, n = pos->next)
444
445 /**
446 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
447 * @pos: the &struct list_head to use as a loop cursor.
448 * @n: another &struct list_head to use as temporary storage
449 * @head: the head for your list.
450 */
451 #define list_for_each_prev_safe(pos, n, head) \
452 for (pos = (head)->prev, n = pos->prev; \
453 pos != (head); \
454 pos = n, n = pos->prev)
455
456 /**
457 * list_for_each_entry - iterate over list of given type
458 * @pos: the type * to use as a loop cursor.
459 * @head: the head for your list.
460 * @member: the name of the list_head within the struct.
461 */
462 #define list_for_each_entry(pos, head, member) \
463 for (pos = list_first_entry(head, typeof(*pos), member); \
464 &pos->member != (head); \
465 pos = list_next_entry(pos, member))
466
467 /**
468 * list_for_each_entry_reverse - iterate backwards over list of given type.
469 * @pos: the type * to use as a loop cursor.
470 * @head: the head for your list.
471 * @member: the name of the list_head within the struct.
472 */
473 #define list_for_each_entry_reverse(pos, head, member) \
474 for (pos = list_last_entry(head, typeof(*pos), member); \
475 &pos->member != (head); \
476 pos = list_prev_entry(pos, member))
477
478 /**
479 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
480 * @pos: the type * to use as a start point
481 * @head: the head of the list
482 * @member: the name of the list_head within the struct.
483 *
484 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
485 */
486 #define list_prepare_entry(pos, head, member) \
487 ((pos) ? : list_entry(head, typeof(*pos), member))
488
489 /**
490 * list_for_each_entry_continue - continue iteration over list of given type
491 * @pos: the type * to use as a loop cursor.
492 * @head: the head for your list.
493 * @member: the name of the list_head within the struct.
494 *
495 * Continue to iterate over list of given type, continuing after
496 * the current position.
497 */
498 #define list_for_each_entry_continue(pos, head, member) \
499 for (pos = list_next_entry(pos, member); \
500 &pos->member != (head); \
501 pos = list_next_entry(pos, member))
502
503 /**
504 * list_for_each_entry_continue_reverse - iterate backwards from the given point
505 * @pos: the type * to use as a loop cursor.
506 * @head: the head for your list.
507 * @member: the name of the list_head within the struct.
508 *
509 * Start to iterate over list of given type backwards, continuing after
510 * the current position.
511 */
512 #define list_for_each_entry_continue_reverse(pos, head, member) \
513 for (pos = list_prev_entry(pos, member); \
514 &pos->member != (head); \
515 pos = list_prev_entry(pos, member))
516
517 /**
518 * list_for_each_entry_from - iterate over list of given type from the current point
519 * @pos: the type * to use as a loop cursor.
520 * @head: the head for your list.
521 * @member: the name of the list_head within the struct.
522 *
523 * Iterate over list of given type, continuing from current position.
524 */
525 #define list_for_each_entry_from(pos, head, member) \
526 for (; &pos->member != (head); \
527 pos = list_next_entry(pos, member))
528
529 /**
530 * list_for_each_entry_from_reverse - iterate backwards over list of given type
531 * from the current point
532 * @pos: the type * to use as a loop cursor.
533 * @head: the head for your list.
534 * @member: the name of the list_head within the struct.
535 *
536 * Iterate backwards over list of given type, continuing from current position.
537 */
538 #define list_for_each_entry_from_reverse(pos, head, member) \
539 for (; &pos->member != (head); \
540 pos = list_prev_entry(pos, member))
541
542 /**
543 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
544 * @pos: the type * to use as a loop cursor.
545 * @n: another type * to use as temporary storage
546 * @head: the head for your list.
547 * @member: the name of the list_head within the struct.
548 */
549 #define list_for_each_entry_safe(pos, n, head, member) \
550 for (pos = list_first_entry(head, typeof(*pos), member), \
551 n = list_next_entry(pos, member); \
552 &pos->member != (head); \
553 pos = n, n = list_next_entry(n, member))
554
555 /**
556 * list_for_each_entry_safe_continue - continue list iteration safe against removal
557 * @pos: the type * to use as a loop cursor.
558 * @n: another type * to use as temporary storage
559 * @head: the head for your list.
560 * @member: the name of the list_head within the struct.
561 *
562 * Iterate over list of given type, continuing after current point,
563 * safe against removal of list entry.
564 */
565 #define list_for_each_entry_safe_continue(pos, n, head, member) \
566 for (pos = list_next_entry(pos, member), \
567 n = list_next_entry(pos, member); \
568 &pos->member != (head); \
569 pos = n, n = list_next_entry(n, member))
570
571 /**
572 * list_for_each_entry_safe_from - iterate over list from current point safe against removal
573 * @pos: the type * to use as a loop cursor.
574 * @n: another type * to use as temporary storage
575 * @head: the head for your list.
576 * @member: the name of the list_head within the struct.
577 *
578 * Iterate over list of given type from current point, safe against
579 * removal of list entry.
580 */
581 #define list_for_each_entry_safe_from(pos, n, head, member) \
582 for (n = list_next_entry(pos, member); \
583 &pos->member != (head); \
584 pos = n, n = list_next_entry(n, member))
585
586 /**
587 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
588 * @pos: the type * to use as a loop cursor.
589 * @n: another type * to use as temporary storage
590 * @head: the head for your list.
591 * @member: the name of the list_head within the struct.
592 *
593 * Iterate backwards over list of given type, safe against removal
594 * of list entry.
595 */
596 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
597 for (pos = list_last_entry(head, typeof(*pos), member), \
598 n = list_prev_entry(pos, member); \
599 &pos->member != (head); \
600 pos = n, n = list_prev_entry(n, member))
601
602 /**
603 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
604 * @pos: the loop cursor used in the list_for_each_entry_safe loop
605 * @n: temporary storage used in list_for_each_entry_safe
606 * @member: the name of the list_head within the struct.
607 *
608 * list_safe_reset_next is not safe to use in general if the list may be
609 * modified concurrently (eg. the lock is dropped in the loop body). An
610 * exception to this is if the cursor element (pos) is pinned in the list,
611 * and list_safe_reset_next is called after re-taking the lock and before
612 * completing the current iteration of the loop body.
613 */
614 #define list_safe_reset_next(pos, n, member) \
615 n = list_next_entry(pos, member)
616
617 /*
618 * Double linked lists with a single pointer list head.
619 * Mostly useful for hash tables where the two pointer list head is
620 * too wasteful.
621 * You lose the ability to access the tail in O(1).
622 */
623
624 #define HLIST_HEAD_INIT { .first = NULL }
625 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
626 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
627 static inline void INIT_HLIST_NODE(struct hlist_node *h)
628 {
629 h->next = NULL;
630 h->pprev = NULL;
631 }
632
633 static inline int hlist_unhashed(const struct hlist_node *h)
634 {
635 return !h->pprev;
636 }
637
638 static inline int hlist_empty(const struct hlist_head *h)
639 {
640 return !READ_ONCE(h->first);
641 }
642
643 static inline void __hlist_del(struct hlist_node *n)
644 {
645 struct hlist_node *next = n->next;
646 struct hlist_node **pprev = n->pprev;
647
648 WRITE_ONCE(*pprev, next);
649 if (next)
650 next->pprev = pprev;
651 }
652
653 static inline void hlist_del(struct hlist_node *n)
654 {
655 __hlist_del(n);
656 n->next = LIST_POISON1;
657 n->pprev = LIST_POISON2;
658 }
659
660 static inline void hlist_del_init(struct hlist_node *n)
661 {
662 if (!hlist_unhashed(n)) {
663 __hlist_del(n);
664 INIT_HLIST_NODE(n);
665 }
666 }
667
668 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
669 {
670 struct hlist_node *first = h->first;
671 n->next = first;
672 if (first)
673 first->pprev = &n->next;
674 WRITE_ONCE(h->first, n);
675 n->pprev = &h->first;
676 }
677
678 /* next must be != NULL */
679 static inline void hlist_add_before(struct hlist_node *n,
680 struct hlist_node *next)
681 {
682 n->pprev = next->pprev;
683 n->next = next;
684 next->pprev = &n->next;
685 WRITE_ONCE(*(n->pprev), n);
686 }
687
688 static inline void hlist_add_behind(struct hlist_node *n,
689 struct hlist_node *prev)
690 {
691 n->next = prev->next;
692 WRITE_ONCE(prev->next, n);
693 n->pprev = &prev->next;
694
695 if (n->next)
696 n->next->pprev = &n->next;
697 }
698
699 /* after that we'll appear to be on some hlist and hlist_del will work */
700 static inline void hlist_add_fake(struct hlist_node *n)
701 {
702 n->pprev = &n->next;
703 }
704
705 static inline bool hlist_fake(struct hlist_node *h)
706 {
707 return h->pprev == &h->next;
708 }
709
710 /*
711 * Check whether the node is the only node of the head without
712 * accessing head:
713 */
714 static inline bool
715 hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
716 {
717 return !n->next && n->pprev == &h->first;
718 }
719
720 /*
721 * Move a list from one list head to another. Fixup the pprev
722 * reference of the first entry if it exists.
723 */
724 static inline void hlist_move_list(struct hlist_head *old,
725 struct hlist_head *new)
726 {
727 new->first = old->first;
728 if (new->first)
729 new->first->pprev = &new->first;
730 old->first = NULL;
731 }
732
733 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
734
735 #define hlist_for_each(pos, head) \
736 for (pos = (head)->first; pos ; pos = pos->next)
737
738 #define hlist_for_each_safe(pos, n, head) \
739 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
740 pos = n)
741
742 #define hlist_entry_safe(ptr, type, member) \
743 ({ typeof(ptr) ____ptr = (ptr); \
744 ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
745 })
746
747 /**
748 * hlist_for_each_entry - iterate over list of given type
749 * @pos: the type * to use as a loop cursor.
750 * @head: the head for your list.
751 * @member: the name of the hlist_node within the struct.
752 */
753 #define hlist_for_each_entry(pos, head, member) \
754 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
755 pos; \
756 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
757
758 /**
759 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
760 * @pos: the type * to use as a loop cursor.
761 * @member: the name of the hlist_node within the struct.
762 */
763 #define hlist_for_each_entry_continue(pos, member) \
764 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
765 pos; \
766 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
767
768 /**
769 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
770 * @pos: the type * to use as a loop cursor.
771 * @member: the name of the hlist_node within the struct.
772 */
773 #define hlist_for_each_entry_from(pos, member) \
774 for (; pos; \
775 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
776
777 /**
778 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
779 * @pos: the type * to use as a loop cursor.
780 * @n: another &struct hlist_node to use as temporary storage
781 * @head: the head for your list.
782 * @member: the name of the hlist_node within the struct.
783 */
784 #define hlist_for_each_entry_safe(pos, n, head, member) \
785 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
786 pos && ({ n = pos->member.next; 1; }); \
787 pos = hlist_entry_safe(n, typeof(*pos), member))
788
789 #endif 1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3
4 #include <linux/errno.h>
5
6 #ifdef __KERNEL__
7
8 #include <linux/mmdebug.h>
9 #include <linux/gfp.h>
10 #include <linux/bug.h>
11 #include <linux/list.h>
12 #include <linux/mmzone.h>
13 #include <linux/rbtree.h>
14 #include <linux/atomic.h>
15 #include <linux/debug_locks.h>
16 #include <linux/mm_types.h>
17 #include <linux/range.h>
18 #include <linux/pfn.h>
19 #include <linux/percpu-refcount.h>
20 #include <linux/bit_spinlock.h>
21 #include <linux/shrinker.h>
22 #include <linux/resource.h>
23 #include <linux/page_ext.h>
24 #include <linux/err.h>
25 #include <linux/page_ref.h>
26
27 struct mempolicy;
28 struct anon_vma;
29 struct anon_vma_chain;
30 struct file_ra_state;
31 struct user_struct;
32 struct writeback_control;
33 struct bdi_writeback;
34
35 #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
36 extern unsigned long max_mapnr;
37
38 static inline void set_max_mapnr(unsigned long limit)
39 {
40 max_mapnr = limit;
41 }
42 #else
43 static inline void set_max_mapnr(unsigned long limit) { }
44 #endif
45
46 extern unsigned long totalram_pages;
47 extern void * high_memory;
48 extern int page_cluster;
49
50 #ifdef CONFIG_SYSCTL
51 extern int sysctl_legacy_va_layout;
52 #else
53 #define sysctl_legacy_va_layout 0
54 #endif
55
56 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
57 extern const int mmap_rnd_bits_min;
58 extern const int mmap_rnd_bits_max;
59 extern int mmap_rnd_bits __read_mostly;
60 #endif
61 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
62 extern const int mmap_rnd_compat_bits_min;
63 extern const int mmap_rnd_compat_bits_max;
64 extern int mmap_rnd_compat_bits __read_mostly;
65 #endif
66
67 #include <asm/page.h>
68 #include <asm/pgtable.h>
69 #include <asm/processor.h>
70
71 #ifndef __pa_symbol
72 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
73 #endif
74
75 #ifndef page_to_virt
76 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
77 #endif
78
79 #ifndef lm_alias
80 #define lm_alias(x) __va(__pa_symbol(x))
81 #endif
82
83 /*
84 * To prevent common memory management code establishing
85 * a zero page mapping on a read fault.
86 * This macro should be defined within <asm/pgtable.h>.
87 * s390 does this to prevent multiplexing of hardware bits
88 * related to the physical page in case of virtualization.
89 */
90 #ifndef mm_forbids_zeropage
91 #define mm_forbids_zeropage(X) (0)
92 #endif
93
94 /*
95 * Default maximum number of active map areas, this limits the number of vmas
96 * per mm struct. Users can overwrite this number by sysctl but there is a
97 * problem.
98 *
99 * When a program's coredump is generated as ELF format, a section is created
100 * per a vma. In ELF, the number of sections is represented in unsigned short.
101 * This means the number of sections should be smaller than 65535 at coredump.
102 * Because the kernel adds some informative sections to a image of program at
103 * generating coredump, we need some margin. The number of extra sections is
104 * 1-3 now and depends on arch. We use "5" as safe margin, here.
105 *
106 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
107 * not a hard limit any more. Although some userspace tools can be surprised by
108 * that.
109 */
110 #define MAPCOUNT_ELF_CORE_MARGIN (5)
111 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
112
113 extern int sysctl_max_map_count;
114
115 extern unsigned long sysctl_user_reserve_kbytes;
116 extern unsigned long sysctl_admin_reserve_kbytes;
117
118 extern int sysctl_overcommit_memory;
119 extern int sysctl_overcommit_ratio;
120 extern unsigned long sysctl_overcommit_kbytes;
121
122 extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
123 size_t *, loff_t *);
124 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
125 size_t *, loff_t *);
126
127 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
128
129 /* to align the pointer to the (next) page boundary */
130 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
131
132 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
133 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
134
135 /*
136 * Linux kernel virtual memory manager primitives.
137 * The idea being to have a "virtual" mm in the same way
138 * we have a virtual fs - giving a cleaner interface to the
139 * mm details, and allowing different kinds of memory mappings
140 * (from shared memory to executable loading to arbitrary
141 * mmap() functions).
142 */
143
144 extern struct kmem_cache *vm_area_cachep;
145
146 #ifndef CONFIG_MMU
147 extern struct rb_root nommu_region_tree;
148 extern struct rw_semaphore nommu_region_sem;
149
150 extern unsigned int kobjsize(const void *objp);
151 #endif
152
153 /*
154 * vm_flags in vm_area_struct, see mm_types.h.
155 * When changing, update also include/trace/events/mmflags.h
156 */
157 #define VM_NONE 0x00000000
158
159 #define VM_READ 0x00000001 /* currently active flags */
160 #define VM_WRITE 0x00000002
161 #define VM_EXEC 0x00000004
162 #define VM_SHARED 0x00000008
163
164 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
165 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
166 #define VM_MAYWRITE 0x00000020
167 #define VM_MAYEXEC 0x00000040
168 #define VM_MAYSHARE 0x00000080
169
170 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
171 #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
172 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
173 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
174 #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
175
176 #define VM_LOCKED 0x00002000
177 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
178
179 /* Used by sys_madvise() */
180 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
181 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
182
183 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
184 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
185 #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
186 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
187 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
188 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
189 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
190 #define VM_ARCH_2 0x02000000
191 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
192
193 #ifdef CONFIG_MEM_SOFT_DIRTY
194 # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
195 #else
196 # define VM_SOFTDIRTY 0
197 #endif
198
199 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
200 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
201 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
202 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
203
204 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
205 #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
206 #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
207 #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
208 #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
209 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
210 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
211 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
212 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
213 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
214
215 #if defined(CONFIG_X86)
216 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
217 #if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
218 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
219 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
220 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1
221 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2
222 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3
223 #endif
224 #elif defined(CONFIG_PPC)
225 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
226 #elif defined(CONFIG_PARISC)
227 # define VM_GROWSUP VM_ARCH_1
228 #elif defined(CONFIG_METAG)
229 # define VM_GROWSUP VM_ARCH_1
230 #elif defined(CONFIG_IA64)
231 # define VM_GROWSUP VM_ARCH_1
232 #elif !defined(CONFIG_MMU)
233 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
234 #endif
235
236 #if defined(CONFIG_X86)
237 /* MPX specific bounds table or bounds directory */
238 # define VM_MPX VM_ARCH_2
239 #endif
240
241 #ifndef VM_GROWSUP
242 # define VM_GROWSUP VM_NONE
243 #endif
244
245 /* Bits set in the VMA until the stack is in its final location */
246 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
247
248 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
249 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
250 #endif
251
252 #ifdef CONFIG_STACK_GROWSUP
253 #define VM_STACK VM_GROWSUP
254 #else
255 #define VM_STACK VM_GROWSDOWN
256 #endif
257
258 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
259
260 /*
261 * Special vmas that are non-mergable, non-mlock()able.
262 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
263 */
264 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
265
266 /* This mask defines which mm->def_flags a process can inherit its parent */
267 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE
268
269 /* This mask is used to clear all the VMA flags used by mlock */
270 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
271
272 /*
273 * mapping from the currently active vm_flags protection bits (the
274 * low four bits) to a page protection mask..
275 */
276 extern pgprot_t protection_map[16];
277
278 #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
279 #define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
280 #define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
281 #define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
282 #define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
283 #define FAULT_FLAG_TRIED 0x20 /* Second try */
284 #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
285 #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
286 #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
287
288 #define FAULT_FLAG_TRACE \
289 { FAULT_FLAG_WRITE, "WRITE" }, \
290 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
291 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
292 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
293 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
294 { FAULT_FLAG_TRIED, "TRIED" }, \
295 { FAULT_FLAG_USER, "USER" }, \
296 { FAULT_FLAG_REMOTE, "REMOTE" }, \
297 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
298
299 /*
300 * vm_fault is filled by the the pagefault handler and passed to the vma's
301 * ->fault function. The vma's ->fault is responsible for returning a bitmask
302 * of VM_FAULT_xxx flags that give details about how the fault was handled.
303 *
304 * MM layer fills up gfp_mask for page allocations but fault handler might
305 * alter it if its implementation requires a different allocation context.
306 *
307 * pgoff should be used in favour of virtual_address, if possible.
308 */
309 struct vm_fault {
310 struct vm_area_struct *vma; /* Target VMA */
311 unsigned int flags; /* FAULT_FLAG_xxx flags */
312 gfp_t gfp_mask; /* gfp mask to be used for allocations */
313 pgoff_t pgoff; /* Logical page offset based on vma */
314 unsigned long address; /* Faulting virtual address */
315 pmd_t *pmd; /* Pointer to pmd entry matching
316 * the 'address' */
317 pud_t *pud; /* Pointer to pud entry matching
318 * the 'address'
319 */
320 pte_t orig_pte; /* Value of PTE at the time of fault */
321
322 struct page *cow_page; /* Page handler may use for COW fault */
323 struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
324 struct page *page; /* ->fault handlers should return a
325 * page here, unless VM_FAULT_NOPAGE
326 * is set (which is also implied by
327 * VM_FAULT_ERROR).
328 */
329 /* These three entries are valid only while holding ptl lock */
330 pte_t *pte; /* Pointer to pte entry matching
331 * the 'address'. NULL if the page
332 * table hasn't been allocated.
333 */
334 spinlock_t *ptl; /* Page table lock.
335 * Protects pte page table if 'pte'
336 * is not NULL, otherwise pmd.
337 */
338 pgtable_t prealloc_pte; /* Pre-allocated pte page table.
339 * vm_ops->map_pages() calls
340 * alloc_set_pte() from atomic context.
341 * do_fault_around() pre-allocates
342 * page table to avoid allocation from
343 * atomic context.
344 */
345 };
346
347 /* page entry size for vm->huge_fault() */
348 enum page_entry_size {
349 PE_SIZE_PTE = 0,
350 PE_SIZE_PMD,
351 PE_SIZE_PUD,
352 };
353
354 /*
355 * These are the virtual MM functions - opening of an area, closing and
356 * unmapping it (needed to keep files on disk up-to-date etc), pointer
357 * to the functions called when a no-page or a wp-page exception occurs.
358 */
359 struct vm_operations_struct {
360 void (*open)(struct vm_area_struct * area);
361 void (*close)(struct vm_area_struct * area);
362 int (*mremap)(struct vm_area_struct * area);
363 int (*fault)(struct vm_fault *vmf);
364 int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
365 void (*map_pages)(struct vm_fault *vmf,
366 pgoff_t start_pgoff, pgoff_t end_pgoff);
367
368 /* notification that a previously read-only page is about to become
369 * writable, if an error is returned it will cause a SIGBUS */
370 int (*page_mkwrite)(struct vm_fault *vmf);
371
372 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
373 int (*pfn_mkwrite)(struct vm_fault *vmf);
374
375 /* called by access_process_vm when get_user_pages() fails, typically
376 * for use by special VMAs that can switch between memory and hardware
377 */
378 int (*access)(struct vm_area_struct *vma, unsigned long addr,
379 void *buf, int len, int write);
380
381 /* Called by the /proc/PID/maps code to ask the vma whether it
382 * has a special name. Returning non-NULL will also cause this
383 * vma to be dumped unconditionally. */
384 const char *(*name)(struct vm_area_struct *vma);
385
386 #ifdef CONFIG_NUMA
387 /*
388 * set_policy() op must add a reference to any non-NULL @new mempolicy
389 * to hold the policy upon return. Caller should pass NULL @new to
390 * remove a policy and fall back to surrounding context--i.e. do not
391 * install a MPOL_DEFAULT policy, nor the task or system default
392 * mempolicy.
393 */
394 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
395
396 /*
397 * get_policy() op must add reference [mpol_get()] to any policy at
398 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
399 * in mm/mempolicy.c will do this automatically.
400 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
401 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
402 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
403 * must return NULL--i.e., do not "fallback" to task or system default
404 * policy.
405 */
406 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
407 unsigned long addr);
408 #endif
409 /*
410 * Called by vm_normal_page() for special PTEs to find the
411 * page for @addr. This is useful if the default behavior
412 * (using pte_page()) would not find the correct page.
413 */
414 struct page *(*find_special_page)(struct vm_area_struct *vma,
415 unsigned long addr);
416 };
417
418 struct mmu_gather;
419 struct inode;
420
421 #define page_private(page) ((page)->private)
422 #define set_page_private(page, v) ((page)->private = (v))
423
424 #if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
425 static inline int pmd_devmap(pmd_t pmd)
426 {
427 return 0;
428 }
429 static inline int pud_devmap(pud_t pud)
430 {
431 return 0;
432 }
433 #endif
434
435 /*
436 * FIXME: take this include out, include page-flags.h in
437 * files which need it (119 of them)
438 */
439 #include <linux/page-flags.h>
440 #include <linux/huge_mm.h>
441
442 /*
443 * Methods to modify the page usage count.
444 *
445 * What counts for a page usage:
446 * - cache mapping (page->mapping)
447 * - private data (page->private)
448 * - page mapped in a task's page tables, each mapping
449 * is counted separately
450 *
451 * Also, many kernel routines increase the page count before a critical
452 * routine so they can be sure the page doesn't go away from under them.
453 */
454
455 /*
456 * Drop a ref, return true if the refcount fell to zero (the page has no users)
457 */
458 static inline int put_page_testzero(struct page *page)
459 {
460 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
461 return page_ref_dec_and_test(page);
462 }
463
464 /*
465 * Try to grab a ref unless the page has a refcount of zero, return false if
466 * that is the case.
467 * This can be called when MMU is off so it must not access
468 * any of the virtual mappings.
469 */
470 static inline int get_page_unless_zero(struct page *page)
471 {
472 return page_ref_add_unless(page, 1, 0);
473 }
474
475 extern int page_is_ram(unsigned long pfn);
476
477 enum {
478 REGION_INTERSECTS,
479 REGION_DISJOINT,
480 REGION_MIXED,
481 };
482
483 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
484 unsigned long desc);
485
486 /* Support for virtually mapped pages */
487 struct page *vmalloc_to_page(const void *addr);
488 unsigned long vmalloc_to_pfn(const void *addr);
489
490 /*
491 * Determine if an address is within the vmalloc range
492 *
493 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
494 * is no special casing required.
495 */
496 static inline bool is_vmalloc_addr(const void *x)
497 {
498 #ifdef CONFIG_MMU
499 unsigned long addr = (unsigned long)x;
500
501 return addr >= VMALLOC_START && addr < VMALLOC_END;
502 #else
503 return false;
504 #endif
505 }
506 #ifdef CONFIG_MMU
507 extern int is_vmalloc_or_module_addr(const void *x);
508 #else
509 static inline int is_vmalloc_or_module_addr(const void *x)
510 {
511 return 0;
512 }
513 #endif
514
515 extern void kvfree(const void *addr);
516
517 static inline atomic_t *compound_mapcount_ptr(struct page *page)
518 {
519 return &page[1].compound_mapcount;
520 }
521
522 static inline int compound_mapcount(struct page *page)
523 {
524 VM_BUG_ON_PAGE(!PageCompound(page), page);
525 page = compound_head(page);
526 return atomic_read(compound_mapcount_ptr(page)) + 1;
527 }
528
529 /*
530 * The atomic page->_mapcount, starts from -1: so that transitions
531 * both from it and to it can be tracked, using atomic_inc_and_test
532 * and atomic_add_negative(-1).
533 */
534 static inline void page_mapcount_reset(struct page *page)
535 {
536 atomic_set(&(page)->_mapcount, -1);
537 }
538
539 int __page_mapcount(struct page *page);
540
541 static inline int page_mapcount(struct page *page)
542 {
543 VM_BUG_ON_PAGE(PageSlab(page), page);
544
545 if (unlikely(PageCompound(page)))
546 return __page_mapcount(page);
547 return atomic_read(&page->_mapcount) + 1;
548 }
549
550 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
551 int total_mapcount(struct page *page);
552 int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
553 #else
554 static inline int total_mapcount(struct page *page)
555 {
556 return page_mapcount(page);
557 }
558 static inline int page_trans_huge_mapcount(struct page *page,
559 int *total_mapcount)
560 {
561 int mapcount = page_mapcount(page);
562 if (total_mapcount)
563 *total_mapcount = mapcount;
564 return mapcount;
565 }
566 #endif
567
568 static inline struct page *virt_to_head_page(const void *x)
569 {
570 struct page *page = virt_to_page(x);
571
572 return compound_head(page);
573 }
574
575 void __put_page(struct page *page);
576
577 void put_pages_list(struct list_head *pages);
578
579 void split_page(struct page *page, unsigned int order);
580
581 /*
582 * Compound pages have a destructor function. Provide a
583 * prototype for that function and accessor functions.
584 * These are _only_ valid on the head of a compound page.
585 */
586 typedef void compound_page_dtor(struct page *);
587
588 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
589 enum compound_dtor_id {
590 NULL_COMPOUND_DTOR,
591 COMPOUND_PAGE_DTOR,
592 #ifdef CONFIG_HUGETLB_PAGE
593 HUGETLB_PAGE_DTOR,
594 #endif
595 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
596 TRANSHUGE_PAGE_DTOR,
597 #endif
598 NR_COMPOUND_DTORS,
599 };
600 extern compound_page_dtor * const compound_page_dtors[];
601
602 static inline void set_compound_page_dtor(struct page *page,
603 enum compound_dtor_id compound_dtor)
604 {
605 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
606 page[1].compound_dtor = compound_dtor;
607 }
608
609 static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
610 {
611 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
612 return compound_page_dtors[page[1].compound_dtor];
613 }
614
615 static inline unsigned int compound_order(struct page *page)
616 {
617 if (!PageHead(page))
618 return 0;
619 return page[1].compound_order;
620 }
621
622 static inline void set_compound_order(struct page *page, unsigned int order)
623 {
624 page[1].compound_order = order;
625 }
626
627 void free_compound_page(struct page *page);
628
629 #ifdef CONFIG_MMU
630 /*
631 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
632 * servicing faults for write access. In the normal case, do always want
633 * pte_mkwrite. But get_user_pages can cause write faults for mappings
634 * that do not have writing enabled, when used by access_process_vm.
635 */
636 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
637 {
638 if (likely(vma->vm_flags & VM_WRITE))
639 pte = pte_mkwrite(pte);
640 return pte;
641 }
642
643 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
644 struct page *page);
645 int finish_fault(struct vm_fault *vmf);
646 int finish_mkwrite_fault(struct vm_fault *vmf);
647 #endif
648
649 /*
650 * Multiple processes may "see" the same page. E.g. for untouched
651 * mappings of /dev/null, all processes see the same page full of
652 * zeroes, and text pages of executables and shared libraries have
653 * only one copy in memory, at most, normally.
654 *
655 * For the non-reserved pages, page_count(page) denotes a reference count.
656 * page_count() == 0 means the page is free. page->lru is then used for
657 * freelist management in the buddy allocator.
658 * page_count() > 0 means the page has been allocated.
659 *
660 * Pages are allocated by the slab allocator in order to provide memory
661 * to kmalloc and kmem_cache_alloc. In this case, the management of the
662 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
663 * unless a particular usage is carefully commented. (the responsibility of
664 * freeing the kmalloc memory is the caller's, of course).
665 *
666 * A page may be used by anyone else who does a __get_free_page().
667 * In this case, page_count still tracks the references, and should only
668 * be used through the normal accessor functions. The top bits of page->flags
669 * and page->virtual store page management information, but all other fields
670 * are unused and could be used privately, carefully. The management of this
671 * page is the responsibility of the one who allocated it, and those who have
672 * subsequently been given references to it.
673 *
674 * The other pages (we may call them "pagecache pages") are completely
675 * managed by the Linux memory manager: I/O, buffers, swapping etc.
676 * The following discussion applies only to them.
677 *
678 * A pagecache page contains an opaque `private' member, which belongs to the
679 * page's address_space. Usually, this is the address of a circular list of
680 * the page's disk buffers. PG_private must be set to tell the VM to call
681 * into the filesystem to release these pages.
682 *
683 * A page may belong to an inode's memory mapping. In this case, page->mapping
684 * is the pointer to the inode, and page->index is the file offset of the page,
685 * in units of PAGE_SIZE.
686 *
687 * If pagecache pages are not associated with an inode, they are said to be
688 * anonymous pages. These may become associated with the swapcache, and in that
689 * case PG_swapcache is set, and page->private is an offset into the swapcache.
690 *
691 * In either case (swapcache or inode backed), the pagecache itself holds one
692 * reference to the page. Setting PG_private should also increment the
693 * refcount. The each user mapping also has a reference to the page.
694 *
695 * The pagecache pages are stored in a per-mapping radix tree, which is
696 * rooted at mapping->page_tree, and indexed by offset.
697 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
698 * lists, we instead now tag pages as dirty/writeback in the radix tree.
699 *
700 * All pagecache pages may be subject to I/O:
701 * - inode pages may need to be read from disk,
702 * - inode pages which have been modified and are MAP_SHARED may need
703 * to be written back to the inode on disk,
704 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
705 * modified may need to be swapped out to swap space and (later) to be read
706 * back into memory.
707 */
708
709 /*
710 * The zone field is never updated after free_area_init_core()
711 * sets it, so none of the operations on it need to be atomic.
712 */
713
714 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
715 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
716 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
717 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
718 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
719
720 /*
721 * Define the bit shifts to access each section. For non-existent
722 * sections we define the shift as 0; that plus a 0 mask ensures
723 * the compiler will optimise away reference to them.
724 */
725 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
726 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
727 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
728 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
729
730 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
731 #ifdef NODE_NOT_IN_PAGE_FLAGS
732 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
733 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
734 SECTIONS_PGOFF : ZONES_PGOFF)
735 #else
736 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
737 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
738 NODES_PGOFF : ZONES_PGOFF)
739 #endif
740
741 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
742
743 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
744 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
745 #endif
746
747 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
748 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
749 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
750 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
751 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
752
753 static inline enum zone_type page_zonenum(const struct page *page)
754 {
755 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
756 }
757
758 #ifdef CONFIG_ZONE_DEVICE
759 void get_zone_device_page(struct page *page);
760 void put_zone_device_page(struct page *page);
761 static inline bool is_zone_device_page(const struct page *page)
762 {
763 return page_zonenum(page) == ZONE_DEVICE;
764 }
765 #else
766 static inline void get_zone_device_page(struct page *page)
767 {
768 }
769 static inline void put_zone_device_page(struct page *page)
770 {
771 }
772 static inline bool is_zone_device_page(const struct page *page)
773 {
774 return false;
775 }
776 #endif
777
778 static inline void get_page(struct page *page)
779 {
780 page = compound_head(page);
781 /*
782 * Getting a normal page or the head of a compound page
783 * requires to already have an elevated page->_refcount.
784 */
785 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
786 page_ref_inc(page);
787
788 if (unlikely(is_zone_device_page(page)))
789 get_zone_device_page(page);
790 }
791
792 static inline void put_page(struct page *page)
793 {
794 page = compound_head(page);
795
796 if (put_page_testzero(page))
797 __put_page(page);
798
799 if (unlikely(is_zone_device_page(page)))
800 put_zone_device_page(page);
801 }
802
803 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
804 #define SECTION_IN_PAGE_FLAGS
805 #endif
806
807 /*
808 * The identification function is mainly used by the buddy allocator for
809 * determining if two pages could be buddies. We are not really identifying
810 * the zone since we could be using the section number id if we do not have
811 * node id available in page flags.
812 * We only guarantee that it will return the same value for two combinable
813 * pages in a zone.
814 */
815 static inline int page_zone_id(struct page *page)
816 {
817 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
818 }
819
820 static inline int zone_to_nid(struct zone *zone)
821 {
822 #ifdef CONFIG_NUMA
823 return zone->node;
824 #else
825 return 0;
826 #endif
827 }
828
829 #ifdef NODE_NOT_IN_PAGE_FLAGS
830 extern int page_to_nid(const struct page *page);
831 #else
832 static inline int page_to_nid(const struct page *page)
833 {
834 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
835 }
836 #endif
837
838 #ifdef CONFIG_NUMA_BALANCING
839 static inline int cpu_pid_to_cpupid(int cpu, int pid)
840 {
841 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
842 }
843
844 static inline int cpupid_to_pid(int cpupid)
845 {
846 return cpupid & LAST__PID_MASK;
847 }
848
849 static inline int cpupid_to_cpu(int cpupid)
850 {
851 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
852 }
853
854 static inline int cpupid_to_nid(int cpupid)
855 {
856 return cpu_to_node(cpupid_to_cpu(cpupid));
857 }
858
859 static inline bool cpupid_pid_unset(int cpupid)
860 {
861 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
862 }
863
864 static inline bool cpupid_cpu_unset(int cpupid)
865 {
866 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
867 }
868
869 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
870 {
871 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
872 }
873
874 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
875 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
876 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
877 {
878 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
879 }
880
881 static inline int page_cpupid_last(struct page *page)
882 {
883 return page->_last_cpupid;
884 }
885 static inline void page_cpupid_reset_last(struct page *page)
886 {
887 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
888 }
889 #else
890 static inline int page_cpupid_last(struct page *page)
891 {
892 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
893 }
894
895 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
896
897 static inline void page_cpupid_reset_last(struct page *page)
898 {
899 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
900 }
901 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
902 #else /* !CONFIG_NUMA_BALANCING */
903 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
904 {
905 return page_to_nid(page); /* XXX */
906 }
907
908 static inline int page_cpupid_last(struct page *page)
909 {
910 return page_to_nid(page); /* XXX */
911 }
912
913 static inline int cpupid_to_nid(int cpupid)
914 {
915 return -1;
916 }
917
918 static inline int cpupid_to_pid(int cpupid)
919 {
920 return -1;
921 }
922
923 static inline int cpupid_to_cpu(int cpupid)
924 {
925 return -1;
926 }
927
928 static inline int cpu_pid_to_cpupid(int nid, int pid)
929 {
930 return -1;
931 }
932
933 static inline bool cpupid_pid_unset(int cpupid)
934 {
935 return 1;
936 }
937
938 static inline void page_cpupid_reset_last(struct page *page)
939 {
940 }
941
942 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
943 {
944 return false;
945 }
946 #endif /* CONFIG_NUMA_BALANCING */
947
948 static inline struct zone *page_zone(const struct page *page)
949 {
950 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
951 }
952
953 static inline pg_data_t *page_pgdat(const struct page *page)
954 {
955 return NODE_DATA(page_to_nid(page));
956 }
957
958 #ifdef SECTION_IN_PAGE_FLAGS
959 static inline void set_page_section(struct page *page, unsigned long section)
960 {
961 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
962 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
963 }
964
965 static inline unsigned long page_to_section(const struct page *page)
966 {
967 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
968 }
969 #endif
970
971 static inline void set_page_zone(struct page *page, enum zone_type zone)
972 {
973 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
974 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
975 }
976
977 static inline void set_page_node(struct page *page, unsigned long node)
978 {
979 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
980 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
981 }
982
983 static inline void set_page_links(struct page *page, enum zone_type zone,
984 unsigned long node, unsigned long pfn)
985 {
986 set_page_zone(page, zone);
987 set_page_node(page, node);
988 #ifdef SECTION_IN_PAGE_FLAGS
989 set_page_section(page, pfn_to_section_nr(pfn));
990 #endif
991 }
992
993 #ifdef CONFIG_MEMCG
994 static inline struct mem_cgroup *page_memcg(struct page *page)
995 {
996 return page->mem_cgroup;
997 }
998 static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
999 {
1000 WARN_ON_ONCE(!rcu_read_lock_held());
1001 return READ_ONCE(page->mem_cgroup);
1002 }
1003 #else
1004 static inline struct mem_cgroup *page_memcg(struct page *page)
1005 {
1006 return NULL;
1007 }
1008 static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1009 {
1010 WARN_ON_ONCE(!rcu_read_lock_held());
1011 return NULL;
1012 }
1013 #endif
1014
1015 /*
1016 * Some inline functions in vmstat.h depend on page_zone()
1017 */
1018 #include <linux/vmstat.h>
1019
1020 static __always_inline void *lowmem_page_address(const struct page *page)
1021 {
1022 return page_to_virt(page);
1023 }
1024
1025 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1026 #define HASHED_PAGE_VIRTUAL
1027 #endif
1028
1029 #if defined(WANT_PAGE_VIRTUAL)
1030 static inline void *page_address(const struct page *page)
1031 {
1032 return page->virtual;
1033 }
1034 static inline void set_page_address(struct page *page, void *address)
1035 {
1036 page->virtual = address;
1037 }
1038 #define page_address_init() do { } while(0)
1039 #endif
1040
1041 #if defined(HASHED_PAGE_VIRTUAL)
1042 void *page_address(const struct page *page);
1043 void set_page_address(struct page *page, void *virtual);
1044 void page_address_init(void);
1045 #endif
1046
1047 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1048 #define page_address(page) lowmem_page_address(page)
1049 #define set_page_address(page, address) do { } while(0)
1050 #define page_address_init() do { } while(0)
1051 #endif
1052
1053 extern void *page_rmapping(struct page *page);
1054 extern struct anon_vma *page_anon_vma(struct page *page);
1055 extern struct address_space *page_mapping(struct page *page);
1056
1057 extern struct address_space *__page_file_mapping(struct page *);
1058
1059 static inline
1060 struct address_space *page_file_mapping(struct page *page)
1061 {
1062 if (unlikely(PageSwapCache(page)))
1063 return __page_file_mapping(page);
1064
1065 return page->mapping;
1066 }
1067
1068 extern pgoff_t __page_file_index(struct page *page);
1069
1070 /*
1071 * Return the pagecache index of the passed page. Regular pagecache pages
1072 * use ->index whereas swapcache pages use swp_offset(->private)
1073 */
1074 static inline pgoff_t page_index(struct page *page)
1075 {
1076 if (unlikely(PageSwapCache(page)))
1077 return __page_file_index(page);
1078 return page->index;
1079 }
1080
1081 bool page_mapped(struct page *page);
1082 struct address_space *page_mapping(struct page *page);
1083
1084 /*
1085 * Return true only if the page has been allocated with
1086 * ALLOC_NO_WATERMARKS and the low watermark was not
1087 * met implying that the system is under some pressure.
1088 */
1089 static inline bool page_is_pfmemalloc(struct page *page)
1090 {
1091 /*
1092 * Page index cannot be this large so this must be
1093 * a pfmemalloc page.
1094 */
1095 return page->index == -1UL;
1096 }
1097
1098 /*
1099 * Only to be called by the page allocator on a freshly allocated
1100 * page.
1101 */
1102 static inline void set_page_pfmemalloc(struct page *page)
1103 {
1104 page->index = -1UL;
1105 }
1106
1107 static inline void clear_page_pfmemalloc(struct page *page)
1108 {
1109 page->index = 0;
1110 }
1111
1112 /*
1113 * Different kinds of faults, as returned by handle_mm_fault().
1114 * Used to decide whether a process gets delivered SIGBUS or
1115 * just gets major/minor fault counters bumped up.
1116 */
1117
1118 #define VM_FAULT_OOM 0x0001
1119 #define VM_FAULT_SIGBUS 0x0002
1120 #define VM_FAULT_MAJOR 0x0004
1121 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
1122 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
1123 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
1124 #define VM_FAULT_SIGSEGV 0x0040
1125
1126 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
1127 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
1128 #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
1129 #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
1130 #define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */
1131
1132 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1133
1134 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1135 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1136 VM_FAULT_FALLBACK)
1137
1138 #define VM_FAULT_RESULT_TRACE \
1139 { VM_FAULT_OOM, "OOM" }, \
1140 { VM_FAULT_SIGBUS, "SIGBUS" }, \
1141 { VM_FAULT_MAJOR, "MAJOR" }, \
1142 { VM_FAULT_WRITE, "WRITE" }, \
1143 { VM_FAULT_HWPOISON, "HWPOISON" }, \
1144 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
1145 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
1146 { VM_FAULT_NOPAGE, "NOPAGE" }, \
1147 { VM_FAULT_LOCKED, "LOCKED" }, \
1148 { VM_FAULT_RETRY, "RETRY" }, \
1149 { VM_FAULT_FALLBACK, "FALLBACK" }, \
1150 { VM_FAULT_DONE_COW, "DONE_COW" }
1151
1152 /* Encode hstate index for a hwpoisoned large page */
1153 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1154 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1155
1156 /*
1157 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1158 */
1159 extern void pagefault_out_of_memory(void);
1160
1161 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1162
1163 /*
1164 * Flags passed to show_mem() and show_free_areas() to suppress output in
1165 * various contexts.
1166 */
1167 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
1168
1169 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1170
1171 extern bool can_do_mlock(void);
1172 extern int user_shm_lock(size_t, struct user_struct *);
1173 extern void user_shm_unlock(size_t, struct user_struct *);
1174
1175 /*
1176 * Parameter block passed down to zap_pte_range in exceptional cases.
1177 */
1178 struct zap_details {
1179 struct address_space *check_mapping; /* Check page->mapping if set */
1180 pgoff_t first_index; /* Lowest page->index to unmap */
1181 pgoff_t last_index; /* Highest page->index to unmap */
1182 };
1183
1184 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1185 pte_t pte);
1186 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1187 pmd_t pmd);
1188
1189 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1190 unsigned long size);
1191 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1192 unsigned long size);
1193 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1194 unsigned long start, unsigned long end);
1195
1196 /**
1197 * mm_walk - callbacks for walk_page_range
1198 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1199 * this handler should only handle pud_trans_huge() puds.
1200 * the pmd_entry or pte_entry callbacks will be used for
1201 * regular PUDs.
1202 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1203 * this handler is required to be able to handle
1204 * pmd_trans_huge() pmds. They may simply choose to
1205 * split_huge_page() instead of handling it explicitly.
1206 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1207 * @pte_hole: if set, called for each hole at all levels
1208 * @hugetlb_entry: if set, called for each hugetlb entry
1209 * @test_walk: caller specific callback function to determine whether
1210 * we walk over the current vma or not. Returning 0
1211 * value means "do page table walk over the current vma,"
1212 * and a negative one means "abort current page table walk
1213 * right now." 1 means "skip the current vma."
1214 * @mm: mm_struct representing the target process of page table walk
1215 * @vma: vma currently walked (NULL if walking outside vmas)
1216 * @private: private data for callbacks' usage
1217 *
1218 * (see the comment on walk_page_range() for more details)
1219 */
1220 struct mm_walk {
1221 int (*pud_entry)(pud_t *pud, unsigned long addr,
1222 unsigned long next, struct mm_walk *walk);
1223 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1224 unsigned long next, struct mm_walk *walk);
1225 int (*pte_entry)(pte_t *pte, unsigned long addr,
1226 unsigned long next, struct mm_walk *walk);
1227 int (*pte_hole)(unsigned long addr, unsigned long next,
1228 struct mm_walk *walk);
1229 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1230 unsigned long addr, unsigned long next,
1231 struct mm_walk *walk);
1232 int (*test_walk)(unsigned long addr, unsigned long next,
1233 struct mm_walk *walk);
1234 struct mm_struct *mm;
1235 struct vm_area_struct *vma;
1236 void *private;
1237 };
1238
1239 int walk_page_range(unsigned long addr, unsigned long end,
1240 struct mm_walk *walk);
1241 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1242 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1243 unsigned long end, unsigned long floor, unsigned long ceiling);
1244 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1245 struct vm_area_struct *vma);
1246 void unmap_mapping_range(struct address_space *mapping,
1247 loff_t const holebegin, loff_t const holelen, int even_cows);
1248 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1249 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1250 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1251 unsigned long *pfn);
1252 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1253 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1254 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1255 void *buf, int len, int write);
1256
1257 static inline void unmap_shared_mapping_range(struct address_space *mapping,
1258 loff_t const holebegin, loff_t const holelen)
1259 {
1260 unmap_mapping_range(mapping, holebegin, holelen, 0);
1261 }
1262
1263 extern void truncate_pagecache(struct inode *inode, loff_t new);
1264 extern void truncate_setsize(struct inode *inode, loff_t newsize);
1265 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1266 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1267 int truncate_inode_page(struct address_space *mapping, struct page *page);
1268 int generic_error_remove_page(struct address_space *mapping, struct page *page);
1269 int invalidate_inode_page(struct page *page);
1270
1271 #ifdef CONFIG_MMU
1272 extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1273 unsigned int flags);
1274 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1275 unsigned long address, unsigned int fault_flags,
1276 bool *unlocked);
1277 #else
1278 static inline int handle_mm_fault(struct vm_area_struct *vma,
1279 unsigned long address, unsigned int flags)
1280 {
1281 /* should never happen if there's no MMU */
1282 BUG();
1283 return VM_FAULT_SIGBUS;
1284 }
1285 static inline int fixup_user_fault(struct task_struct *tsk,
1286 struct mm_struct *mm, unsigned long address,
1287 unsigned int fault_flags, bool *unlocked)
1288 {
1289 /* should never happen if there's no MMU */
1290 BUG();
1291 return -EFAULT;
1292 }
1293 #endif
1294
1295 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1296 unsigned int gup_flags);
1297 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1298 void *buf, int len, unsigned int gup_flags);
1299 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1300 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1301
1302 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1303 unsigned long start, unsigned long nr_pages,
1304 unsigned int gup_flags, struct page **pages,
1305 struct vm_area_struct **vmas, int *locked);
1306 long get_user_pages(unsigned long start, unsigned long nr_pages,
1307 unsigned int gup_flags, struct page **pages,
1308 struct vm_area_struct **vmas);
1309 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1310 unsigned int gup_flags, struct page **pages, int *locked);
1311 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1312 struct page **pages, unsigned int gup_flags);
1313 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1314 struct page **pages);
1315
1316 /* Container for pinned pfns / pages */
1317 struct frame_vector {
1318 unsigned int nr_allocated; /* Number of frames we have space for */
1319 unsigned int nr_frames; /* Number of frames stored in ptrs array */
1320 bool got_ref; /* Did we pin pages by getting page ref? */
1321 bool is_pfns; /* Does array contain pages or pfns? */
1322 void *ptrs[0]; /* Array of pinned pfns / pages. Use
1323 * pfns_vector_pages() or pfns_vector_pfns()
1324 * for access */
1325 };
1326
1327 struct frame_vector *frame_vector_create(unsigned int nr_frames);
1328 void frame_vector_destroy(struct frame_vector *vec);
1329 int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1330 unsigned int gup_flags, struct frame_vector *vec);
1331 void put_vaddr_frames(struct frame_vector *vec);
1332 int frame_vector_to_pages(struct frame_vector *vec);
1333 void frame_vector_to_pfns(struct frame_vector *vec);
1334
1335 static inline unsigned int frame_vector_count(struct frame_vector *vec)
1336 {
1337 return vec->nr_frames;
1338 }
1339
1340 static inline struct page **frame_vector_pages(struct frame_vector *vec)
1341 {
1342 if (vec->is_pfns) {
1343 int err = frame_vector_to_pages(vec);
1344
1345 if (err)
1346 return ERR_PTR(err);
1347 }
1348 return (struct page **)(vec->ptrs);
1349 }
1350
1351 static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1352 {
1353 if (!vec->is_pfns)
1354 frame_vector_to_pfns(vec);
1355 return (unsigned long *)(vec->ptrs);
1356 }
1357
1358 struct kvec;
1359 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1360 struct page **pages);
1361 int get_kernel_page(unsigned long start, int write, struct page **pages);
1362 struct page *get_dump_page(unsigned long addr);
1363
1364 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1365 extern void do_invalidatepage(struct page *page, unsigned int offset,
1366 unsigned int length);
1367
1368 int __set_page_dirty_nobuffers(struct page *page);
1369 int __set_page_dirty_no_writeback(struct page *page);
1370 int redirty_page_for_writepage(struct writeback_control *wbc,
1371 struct page *page);
1372 void account_page_dirtied(struct page *page, struct address_space *mapping);
1373 void account_page_cleaned(struct page *page, struct address_space *mapping,
1374 struct bdi_writeback *wb);
1375 int set_page_dirty(struct page *page);
1376 int set_page_dirty_lock(struct page *page);
1377 void cancel_dirty_page(struct page *page);
1378 int clear_page_dirty_for_io(struct page *page);
1379
1380 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1381
1382 /* Is the vma a continuation of the stack vma above it? */
1383 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1384 {
1385 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1386 }
1387
1388 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1389 {
1390 return !vma->vm_ops;
1391 }
1392
1393 #ifdef CONFIG_SHMEM
1394 /*
1395 * The vma_is_shmem is not inline because it is used only by slow
1396 * paths in userfault.
1397 */
1398 bool vma_is_shmem(struct vm_area_struct *vma);
1399 #else
1400 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1401 #endif
1402
1403 static inline int stack_guard_page_start(struct vm_area_struct *vma,
1404 unsigned long addr)
1405 {
1406 return (vma->vm_flags & VM_GROWSDOWN) &&
1407 (vma->vm_start == addr) &&
1408 !vma_growsdown(vma->vm_prev, addr);
1409 }
1410
1411 /* Is the vma a continuation of the stack vma below it? */
1412 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1413 {
1414 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1415 }
1416
1417 static inline int stack_guard_page_end(struct vm_area_struct *vma,
1418 unsigned long addr)
1419 {
1420 return (vma->vm_flags & VM_GROWSUP) &&
1421 (vma->vm_end == addr) &&
1422 !vma_growsup(vma->vm_next, addr);
1423 }
1424
1425 int vma_is_stack_for_current(struct vm_area_struct *vma);
1426
1427 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1428 unsigned long old_addr, struct vm_area_struct *new_vma,
1429 unsigned long new_addr, unsigned long len,
1430 bool need_rmap_locks);
1431 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1432 unsigned long end, pgprot_t newprot,
1433 int dirty_accountable, int prot_numa);
1434 extern int mprotect_fixup(struct vm_area_struct *vma,
1435 struct vm_area_struct **pprev, unsigned long start,
1436 unsigned long end, unsigned long newflags);
1437
1438 /*
1439 * doesn't attempt to fault and will return short.
1440 */
1441 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1442 struct page **pages);
1443 /*
1444 * per-process(per-mm_struct) statistics.
1445 */
1446 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1447 {
1448 long val = atomic_long_read(&mm->rss_stat.count[member]);
1449
1450 #ifdef SPLIT_RSS_COUNTING
1451 /*
1452 * counter is updated in asynchronous manner and may go to minus.
1453 * But it's never be expected number for users.
1454 */
1455 if (val < 0)
1456 val = 0;
1457 #endif
1458 return (unsigned long)val;
1459 }
1460
1461 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1462 {
1463 atomic_long_add(value, &mm->rss_stat.count[member]);
1464 }
1465
1466 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1467 {
1468 atomic_long_inc(&mm->rss_stat.count[member]);
1469 }
1470
1471 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1472 {
1473 atomic_long_dec(&mm->rss_stat.count[member]);
1474 }
1475
1476 /* Optimized variant when page is already known not to be PageAnon */
1477 static inline int mm_counter_file(struct page *page)
1478 {
1479 if (PageSwapBacked(page))
1480 return MM_SHMEMPAGES;
1481 return MM_FILEPAGES;
1482 }
1483
1484 static inline int mm_counter(struct page *page)
1485 {
1486 if (PageAnon(page))
1487 return MM_ANONPAGES;
1488 return mm_counter_file(page);
1489 }
1490
1491 static inline unsigned long get_mm_rss(struct mm_struct *mm)
1492 {
1493 return get_mm_counter(mm, MM_FILEPAGES) +
1494 get_mm_counter(mm, MM_ANONPAGES) +
1495 get_mm_counter(mm, MM_SHMEMPAGES);
1496 }
1497
1498 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1499 {
1500 return max(mm->hiwater_rss, get_mm_rss(mm));
1501 }
1502
1503 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1504 {
1505 return max(mm->hiwater_vm, mm->total_vm);
1506 }
1507
1508 static inline void update_hiwater_rss(struct mm_struct *mm)
1509 {
1510 unsigned long _rss = get_mm_rss(mm);
1511
1512 if ((mm)->hiwater_rss < _rss)
1513 (mm)->hiwater_rss = _rss;
1514 }
1515
1516 static inline void update_hiwater_vm(struct mm_struct *mm)
1517 {
1518 if (mm->hiwater_vm < mm->total_vm)
1519 mm->hiwater_vm = mm->total_vm;
1520 }
1521
1522 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1523 {
1524 mm->hiwater_rss = get_mm_rss(mm);
1525 }
1526
1527 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1528 struct mm_struct *mm)
1529 {
1530 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1531
1532 if (*maxrss < hiwater_rss)
1533 *maxrss = hiwater_rss;
1534 }
1535
1536 #if defined(SPLIT_RSS_COUNTING)
1537 void sync_mm_rss(struct mm_struct *mm);
1538 #else
1539 static inline void sync_mm_rss(struct mm_struct *mm)
1540 {
1541 }
1542 #endif
1543
1544 #ifndef __HAVE_ARCH_PTE_DEVMAP
1545 static inline int pte_devmap(pte_t pte)
1546 {
1547 return 0;
1548 }
1549 #endif
1550
1551 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1552
1553 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1554 spinlock_t **ptl);
1555 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1556 spinlock_t **ptl)
1557 {
1558 pte_t *ptep;
1559 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1560 return ptep;
1561 }
1562
1563 #ifdef __PAGETABLE_PUD_FOLDED
1564 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1565 unsigned long address)
1566 {
1567 return 0;
1568 }
1569 #else
1570 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1571 #endif
1572
1573 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1574 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1575 unsigned long address)
1576 {
1577 return 0;
1578 }
1579
1580 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1581
1582 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1583 {
1584 return 0;
1585 }
1586
1587 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1588 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1589
1590 #else
1591 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1592
1593 static inline void mm_nr_pmds_init(struct mm_struct *mm)
1594 {
1595 atomic_long_set(&mm->nr_pmds, 0);
1596 }
1597
1598 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1599 {
1600 return atomic_long_read(&mm->nr_pmds);
1601 }
1602
1603 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1604 {
1605 atomic_long_inc(&mm->nr_pmds);
1606 }
1607
1608 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1609 {
1610 atomic_long_dec(&mm->nr_pmds);
1611 }
1612 #endif
1613
1614 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1615 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1616
1617 /*
1618 * The following ifdef needed to get the 4level-fixup.h header to work.
1619 * Remove it when 4level-fixup.h has been removed.
1620 */
1621 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1622 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1623 {
1624 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1625 NULL: pud_offset(pgd, address);
1626 }
1627
1628 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1629 {
1630 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1631 NULL: pmd_offset(pud, address);
1632 }
1633 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1634
1635 #if USE_SPLIT_PTE_PTLOCKS
1636 #if ALLOC_SPLIT_PTLOCKS
1637 void __init ptlock_cache_init(void);
1638 extern bool ptlock_alloc(struct page *page);
1639 extern void ptlock_free(struct page *page);
1640
1641 static inline spinlock_t *ptlock_ptr(struct page *page)
1642 {
1643 return page->ptl;
1644 }
1645 #else /* ALLOC_SPLIT_PTLOCKS */
1646 static inline void ptlock_cache_init(void)
1647 {
1648 }
1649
1650 static inline bool ptlock_alloc(struct page *page)
1651 {
1652 return true;
1653 }
1654
1655 static inline void ptlock_free(struct page *page)
1656 {
1657 }
1658
1659 static inline spinlock_t *ptlock_ptr(struct page *page)
1660 {
1661 return &page->ptl;
1662 }
1663 #endif /* ALLOC_SPLIT_PTLOCKS */
1664
1665 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1666 {
1667 return ptlock_ptr(pmd_page(*pmd));
1668 }
1669
1670 static inline bool ptlock_init(struct page *page)
1671 {
1672 /*
1673 * prep_new_page() initialize page->private (and therefore page->ptl)
1674 * with 0. Make sure nobody took it in use in between.
1675 *
1676 * It can happen if arch try to use slab for page table allocation:
1677 * slab code uses page->slab_cache, which share storage with page->ptl.
1678 */
1679 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1680 if (!ptlock_alloc(page))
1681 return false;
1682 spin_lock_init(ptlock_ptr(page));
1683 return true;
1684 }
1685
1686 /* Reset page->mapping so free_pages_check won't complain. */
1687 static inline void pte_lock_deinit(struct page *page)
1688 {
1689 page->mapping = NULL;
1690 ptlock_free(page);
1691 }
1692
1693 #else /* !USE_SPLIT_PTE_PTLOCKS */
1694 /*
1695 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1696 */
1697 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1698 {
1699 return &mm->page_table_lock;
1700 }
1701 static inline void ptlock_cache_init(void) {}
1702 static inline bool ptlock_init(struct page *page) { return true; }
1703 static inline void pte_lock_deinit(struct page *page) {}
1704 #endif /* USE_SPLIT_PTE_PTLOCKS */
1705
1706 static inline void pgtable_init(void)
1707 {
1708 ptlock_cache_init();
1709 pgtable_cache_init();
1710 }
1711
1712 static inline bool pgtable_page_ctor(struct page *page)
1713 {
1714 if (!ptlock_init(page))
1715 return false;
1716 inc_zone_page_state(page, NR_PAGETABLE);
1717 return true;
1718 }
1719
1720 static inline void pgtable_page_dtor(struct page *page)
1721 {
1722 pte_lock_deinit(page);
1723 dec_zone_page_state(page, NR_PAGETABLE);
1724 }
1725
1726 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
1727 ({ \
1728 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1729 pte_t *__pte = pte_offset_map(pmd, address); \
1730 *(ptlp) = __ptl; \
1731 spin_lock(__ptl); \
1732 __pte; \
1733 })
1734
1735 #define pte_unmap_unlock(pte, ptl) do { \
1736 spin_unlock(ptl); \
1737 pte_unmap(pte); \
1738 } while (0)
1739
1740 #define pte_alloc(mm, pmd, address) \
1741 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1742
1743 #define pte_alloc_map(mm, pmd, address) \
1744 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1745
1746 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1747 (pte_alloc(mm, pmd, address) ? \
1748 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1749
1750 #define pte_alloc_kernel(pmd, address) \
1751 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1752 NULL: pte_offset_kernel(pmd, address))
1753
1754 #if USE_SPLIT_PMD_PTLOCKS
1755
1756 static struct page *pmd_to_page(pmd_t *pmd)
1757 {
1758 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1759 return virt_to_page((void *)((unsigned long) pmd & mask));
1760 }
1761
1762 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1763 {
1764 return ptlock_ptr(pmd_to_page(pmd));
1765 }
1766
1767 static inline bool pgtable_pmd_page_ctor(struct page *page)
1768 {
1769 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1770 page->pmd_huge_pte = NULL;
1771 #endif
1772 return ptlock_init(page);
1773 }
1774
1775 static inline void pgtable_pmd_page_dtor(struct page *page)
1776 {
1777 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1778 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1779 #endif
1780 ptlock_free(page);
1781 }
1782
1783 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1784
1785 #else
1786
1787 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1788 {
1789 return &mm->page_table_lock;
1790 }
1791
1792 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1793 static inline void pgtable_pmd_page_dtor(struct page *page) {}
1794
1795 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1796
1797 #endif
1798
1799 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1800 {
1801 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1802 spin_lock(ptl);
1803 return ptl;
1804 }
1805
1806 /*
1807 * No scalability reason to split PUD locks yet, but follow the same pattern
1808 * as the PMD locks to make it easier if we decide to. The VM should not be
1809 * considered ready to switch to split PUD locks yet; there may be places
1810 * which need to be converted from page_table_lock.
1811 */
1812 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
1813 {
1814 return &mm->page_table_lock;
1815 }
1816
1817 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
1818 {
1819 spinlock_t *ptl = pud_lockptr(mm, pud);
1820
1821 spin_lock(ptl);
1822 return ptl;
1823 }
1824
1825 extern void __init pagecache_init(void);
1826 extern void free_area_init(unsigned long * zones_size);
1827 extern void free_area_init_node(int nid, unsigned long * zones_size,
1828 unsigned long zone_start_pfn, unsigned long *zholes_size);
1829 extern void free_initmem(void);
1830
1831 /*
1832 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1833 * into the buddy system. The freed pages will be poisoned with pattern
1834 * "poison" if it's within range [0, UCHAR_MAX].
1835 * Return pages freed into the buddy system.
1836 */
1837 extern unsigned long free_reserved_area(void *start, void *end,
1838 int poison, char *s);
1839
1840 #ifdef CONFIG_HIGHMEM
1841 /*
1842 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1843 * and totalram_pages.
1844 */
1845 extern void free_highmem_page(struct page *page);
1846 #endif
1847
1848 extern void adjust_managed_page_count(struct page *page, long count);
1849 extern void mem_init_print_info(const char *str);
1850
1851 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
1852
1853 /* Free the reserved page into the buddy system, so it gets managed. */
1854 static inline void __free_reserved_page(struct page *page)
1855 {
1856 ClearPageReserved(page);
1857 init_page_count(page);
1858 __free_page(page);
1859 }
1860
1861 static inline void free_reserved_page(struct page *page)
1862 {
1863 __free_reserved_page(page);
1864 adjust_managed_page_count(page, 1);
1865 }
1866
1867 static inline void mark_page_reserved(struct page *page)
1868 {
1869 SetPageReserved(page);
1870 adjust_managed_page_count(page, -1);
1871 }
1872
1873 /*
1874 * Default method to free all the __init memory into the buddy system.
1875 * The freed pages will be poisoned with pattern "poison" if it's within
1876 * range [0, UCHAR_MAX].
1877 * Return pages freed into the buddy system.
1878 */
1879 static inline unsigned long free_initmem_default(int poison)
1880 {
1881 extern char __init_begin[], __init_end[];
1882
1883 return free_reserved_area(&__init_begin, &__init_end,
1884 poison, "unused kernel");
1885 }
1886
1887 static inline unsigned long get_num_physpages(void)
1888 {
1889 int nid;
1890 unsigned long phys_pages = 0;
1891
1892 for_each_online_node(nid)
1893 phys_pages += node_present_pages(nid);
1894
1895 return phys_pages;
1896 }
1897
1898 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1899 /*
1900 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1901 * zones, allocate the backing mem_map and account for memory holes in a more
1902 * architecture independent manner. This is a substitute for creating the
1903 * zone_sizes[] and zholes_size[] arrays and passing them to
1904 * free_area_init_node()
1905 *
1906 * An architecture is expected to register range of page frames backed by
1907 * physical memory with memblock_add[_node]() before calling
1908 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1909 * usage, an architecture is expected to do something like
1910 *
1911 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1912 * max_highmem_pfn};
1913 * for_each_valid_physical_page_range()
1914 * memblock_add_node(base, size, nid)
1915 * free_area_init_nodes(max_zone_pfns);
1916 *
1917 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1918 * registered physical page range. Similarly
1919 * sparse_memory_present_with_active_regions() calls memory_present() for
1920 * each range when SPARSEMEM is enabled.
1921 *
1922 * See mm/page_alloc.c for more information on each function exposed by
1923 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1924 */
1925 extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1926 unsigned long node_map_pfn_alignment(void);
1927 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1928 unsigned long end_pfn);
1929 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1930 unsigned long end_pfn);
1931 extern void get_pfn_range_for_nid(unsigned int nid,
1932 unsigned long *start_pfn, unsigned long *end_pfn);
1933 extern unsigned long find_min_pfn_with_active_regions(void);
1934 extern void free_bootmem_with_active_regions(int nid,
1935 unsigned long max_low_pfn);
1936 extern void sparse_memory_present_with_active_regions(int nid);
1937
1938 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1939
1940 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1941 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1942 static inline int __early_pfn_to_nid(unsigned long pfn,
1943 struct mminit_pfnnid_cache *state)
1944 {
1945 return 0;
1946 }
1947 #else
1948 /* please see mm/page_alloc.c */
1949 extern int __meminit early_pfn_to_nid(unsigned long pfn);
1950 /* there is a per-arch backend function. */
1951 extern int __meminit __early_pfn_to_nid(unsigned long pfn,
1952 struct mminit_pfnnid_cache *state);
1953 #endif
1954
1955 extern void set_dma_reserve(unsigned long new_dma_reserve);
1956 extern void memmap_init_zone(unsigned long, int, unsigned long,
1957 unsigned long, enum memmap_context);
1958 extern void setup_per_zone_wmarks(void);
1959 extern int __meminit init_per_zone_wmark_min(void);
1960 extern void mem_init(void);
1961 extern void __init mmap_init(void);
1962 extern void show_mem(unsigned int flags, nodemask_t *nodemask);
1963 extern long si_mem_available(void);
1964 extern void si_meminfo(struct sysinfo * val);
1965 extern void si_meminfo_node(struct sysinfo *val, int nid);
1966 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
1967 extern unsigned long arch_reserved_kernel_pages(void);
1968 #endif
1969
1970 extern __printf(3, 4)
1971 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
1972
1973 extern void setup_per_cpu_pageset(void);
1974
1975 extern void zone_pcp_update(struct zone *zone);
1976 extern void zone_pcp_reset(struct zone *zone);
1977
1978 /* page_alloc.c */
1979 extern int min_free_kbytes;
1980 extern int watermark_scale_factor;
1981
1982 /* nommu.c */
1983 extern atomic_long_t mmap_pages_allocated;
1984 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1985
1986 /* interval_tree.c */
1987 void vma_interval_tree_insert(struct vm_area_struct *node,
1988 struct rb_root *root);
1989 void vma_interval_tree_insert_after(struct vm_area_struct *node,
1990 struct vm_area_struct *prev,
1991 struct rb_root *root);
1992 void vma_interval_tree_remove(struct vm_area_struct *node,
1993 struct rb_root *root);
1994 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1995 unsigned long start, unsigned long last);
1996 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1997 unsigned long start, unsigned long last);
1998
1999 #define vma_interval_tree_foreach(vma, root, start, last) \
2000 for (vma = vma_interval_tree_iter_first(root, start, last); \
2001 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2002
2003 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2004 struct rb_root *root);
2005 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2006 struct rb_root *root);
2007 struct anon_vma_chain *anon_vma_interval_tree_iter_first(
2008 struct rb_root *root, unsigned long start, unsigned long last);
2009 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2010 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2011 #ifdef CONFIG_DEBUG_VM_RB
2012 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2013 #endif
2014
2015 #define anon_vma_interval_tree_foreach(avc, root, start, last) \
2016 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2017 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2018
2019 /* mmap.c */
2020 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2021 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2022 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2023 struct vm_area_struct *expand);
2024 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2025 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2026 {
2027 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2028 }
2029 extern struct vm_area_struct *vma_merge(struct mm_struct *,
2030 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2031 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2032 struct mempolicy *, struct vm_userfaultfd_ctx);
2033 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2034 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2035 unsigned long addr, int new_below);
2036 extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2037 unsigned long addr, int new_below);
2038 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2039 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2040 struct rb_node **, struct rb_node *);
2041 extern void unlink_file_vma(struct vm_area_struct *);
2042 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2043 unsigned long addr, unsigned long len, pgoff_t pgoff,
2044 bool *need_rmap_locks);
2045 extern void exit_mmap(struct mm_struct *);
2046
2047 static inline int check_data_rlimit(unsigned long rlim,
2048 unsigned long new,
2049 unsigned long start,
2050 unsigned long end_data,
2051 unsigned long start_data)
2052 {
2053 if (rlim < RLIM_INFINITY) {
2054 if (((new - start) + (end_data - start_data)) > rlim)
2055 return -ENOSPC;
2056 }
2057
2058 return 0;
2059 }
2060
2061 extern int mm_take_all_locks(struct mm_struct *mm);
2062 extern void mm_drop_all_locks(struct mm_struct *mm);
2063
2064 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2065 extern struct file *get_mm_exe_file(struct mm_struct *mm);
2066 extern struct file *get_task_exe_file(struct task_struct *task);
2067
2068 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2069 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2070
2071 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2072 const struct vm_special_mapping *sm);
2073 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2074 unsigned long addr, unsigned long len,
2075 unsigned long flags,
2076 const struct vm_special_mapping *spec);
2077 /* This is an obsolete alternative to _install_special_mapping. */
2078 extern int install_special_mapping(struct mm_struct *mm,
2079 unsigned long addr, unsigned long len,
2080 unsigned long flags, struct page **pages);
2081
2082 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2083
2084 extern unsigned long mmap_region(struct file *file, unsigned long addr,
2085 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2086 struct list_head *uf);
2087 extern unsigned long do_mmap(struct file *file, unsigned long addr,
2088 unsigned long len, unsigned long prot, unsigned long flags,
2089 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2090 struct list_head *uf);
2091 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2092 struct list_head *uf);
2093
2094 static inline unsigned long
2095 do_mmap_pgoff(struct file *file, unsigned long addr,
2096 unsigned long len, unsigned long prot, unsigned long flags,
2097 unsigned long pgoff, unsigned long *populate,
2098 struct list_head *uf)
2099 {
2100 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2101 }
2102
2103 #ifdef CONFIG_MMU
2104 extern int __mm_populate(unsigned long addr, unsigned long len,
2105 int ignore_errors);
2106 static inline void mm_populate(unsigned long addr, unsigned long len)
2107 {
2108 /* Ignore errors */
2109 (void) __mm_populate(addr, len, 1);
2110 }
2111 #else
2112 static inline void mm_populate(unsigned long addr, unsigned long len) {}
2113 #endif
2114
2115 /* These take the mm semaphore themselves */
2116 extern int __must_check vm_brk(unsigned long, unsigned long);
2117 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2118 extern int vm_munmap(unsigned long, size_t);
2119 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2120 unsigned long, unsigned long,
2121 unsigned long, unsigned long);
2122
2123 struct vm_unmapped_area_info {
2124 #define VM_UNMAPPED_AREA_TOPDOWN 1
2125 unsigned long flags;
2126 unsigned long length;
2127 unsigned long low_limit;
2128 unsigned long high_limit;
2129 unsigned long align_mask;
2130 unsigned long align_offset;
2131 };
2132
2133 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2134 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2135
2136 /*
2137 * Search for an unmapped address range.
2138 *
2139 * We are looking for a range that:
2140 * - does not intersect with any VMA;
2141 * - is contained within the [low_limit, high_limit) interval;
2142 * - is at least the desired size.
2143 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
2144 */
2145 static inline unsigned long
2146 vm_unmapped_area(struct vm_unmapped_area_info *info)
2147 {
2148 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2149 return unmapped_area_topdown(info);
2150 else
2151 return unmapped_area(info);
2152 }
2153
2154 /* truncate.c */
2155 extern void truncate_inode_pages(struct address_space *, loff_t);
2156 extern void truncate_inode_pages_range(struct address_space *,
2157 loff_t lstart, loff_t lend);
2158 extern void truncate_inode_pages_final(struct address_space *);
2159
2160 /* generic vm_area_ops exported for stackable file systems */
2161 extern int filemap_fault(struct vm_fault *vmf);
2162 extern void filemap_map_pages(struct vm_fault *vmf,
2163 pgoff_t start_pgoff, pgoff_t end_pgoff);
2164 extern int filemap_page_mkwrite(struct vm_fault *vmf);
2165
2166 /* mm/page-writeback.c */
2167 int write_one_page(struct page *page, int wait);
2168 void task_dirty_inc(struct task_struct *tsk);
2169
2170 /* readahead.c */
2171 #define VM_MAX_READAHEAD 128 /* kbytes */
2172 #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
2173
2174 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2175 pgoff_t offset, unsigned long nr_to_read);
2176
2177 void page_cache_sync_readahead(struct address_space *mapping,
2178 struct file_ra_state *ra,
2179 struct file *filp,
2180 pgoff_t offset,
2181 unsigned long size);
2182
2183 void page_cache_async_readahead(struct address_space *mapping,
2184 struct file_ra_state *ra,
2185 struct file *filp,
2186 struct page *pg,
2187 pgoff_t offset,
2188 unsigned long size);
2189
2190 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2191 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2192
2193 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
2194 extern int expand_downwards(struct vm_area_struct *vma,
2195 unsigned long address);
2196 #if VM_GROWSUP
2197 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2198 #else
2199 #define expand_upwards(vma, address) (0)
2200 #endif
2201
2202 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
2203 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2204 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2205 struct vm_area_struct **pprev);
2206
2207 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
2208 NULL if none. Assume start_addr < end_addr. */
2209 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2210 {
2211 struct vm_area_struct * vma = find_vma(mm,start_addr);
2212
2213 if (vma && end_addr <= vma->vm_start)
2214 vma = NULL;
2215 return vma;
2216 }
2217
2218 static inline unsigned long vma_pages(struct vm_area_struct *vma)
2219 {
2220 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2221 }
2222
2223 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2224 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2225 unsigned long vm_start, unsigned long vm_end)
2226 {
2227 struct vm_area_struct *vma = find_vma(mm, vm_start);
2228
2229 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2230 vma = NULL;
2231
2232 return vma;
2233 }
2234
2235 #ifdef CONFIG_MMU
2236 pgprot_t vm_get_page_prot(unsigned long vm_flags);
2237 void vma_set_page_prot(struct vm_area_struct *vma);
2238 #else
2239 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2240 {
2241 return __pgprot(0);
2242 }
2243 static inline void vma_set_page_prot(struct vm_area_struct *vma)
2244 {
2245 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2246 }
2247 #endif
2248
2249 #ifdef CONFIG_NUMA_BALANCING
2250 unsigned long change_prot_numa(struct vm_area_struct *vma,
2251 unsigned long start, unsigned long end);
2252 #endif
2253
2254 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2255 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2256 unsigned long pfn, unsigned long size, pgprot_t);
2257 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2258 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2259 unsigned long pfn);
2260 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2261 unsigned long pfn, pgprot_t pgprot);
2262 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2263 pfn_t pfn);
2264 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2265
2266
2267 struct page *follow_page_mask(struct vm_area_struct *vma,
2268 unsigned long address, unsigned int foll_flags,
2269 unsigned int *page_mask);
2270
2271 static inline struct page *follow_page(struct vm_area_struct *vma,
2272 unsigned long address, unsigned int foll_flags)
2273 {
2274 unsigned int unused_page_mask;
2275 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2276 }
2277
2278 #define FOLL_WRITE 0x01 /* check pte is writable */
2279 #define FOLL_TOUCH 0x02 /* mark page accessed */
2280 #define FOLL_GET 0x04 /* do get_page on page */
2281 #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
2282 #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
2283 #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
2284 * and return without waiting upon it */
2285 #define FOLL_POPULATE 0x40 /* fault in page */
2286 #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
2287 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
2288 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
2289 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
2290 #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
2291 #define FOLL_MLOCK 0x1000 /* lock present pages */
2292 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2293 #define FOLL_COW 0x4000 /* internal GUP flag */
2294
2295 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2296 void *data);
2297 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2298 unsigned long size, pte_fn_t fn, void *data);
2299
2300
2301 #ifdef CONFIG_PAGE_POISONING
2302 extern bool page_poisoning_enabled(void);
2303 extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2304 extern bool page_is_poisoned(struct page *page);
2305 #else
2306 static inline bool page_poisoning_enabled(void) { return false; }
2307 static inline void kernel_poison_pages(struct page *page, int numpages,
2308 int enable) { }
2309 static inline bool page_is_poisoned(struct page *page) { return false; }
2310 #endif
2311
2312 #ifdef CONFIG_DEBUG_PAGEALLOC
2313 extern bool _debug_pagealloc_enabled;
2314 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2315
2316 static inline bool debug_pagealloc_enabled(void)
2317 {
2318 return _debug_pagealloc_enabled;
2319 }
2320
2321 static inline void
2322 kernel_map_pages(struct page *page, int numpages, int enable)
2323 {
2324 if (!debug_pagealloc_enabled())
2325 return;
2326
2327 __kernel_map_pages(page, numpages, enable);
2328 }
2329 #ifdef CONFIG_HIBERNATION
2330 extern bool kernel_page_present(struct page *page);
2331 #endif /* CONFIG_HIBERNATION */
2332 #else /* CONFIG_DEBUG_PAGEALLOC */
2333 static inline void
2334 kernel_map_pages(struct page *page, int numpages, int enable) {}
2335 #ifdef CONFIG_HIBERNATION
2336 static inline bool kernel_page_present(struct page *page) { return true; }
2337 #endif /* CONFIG_HIBERNATION */
2338 static inline bool debug_pagealloc_enabled(void)
2339 {
2340 return false;
2341 }
2342 #endif /* CONFIG_DEBUG_PAGEALLOC */
2343
2344 #ifdef __HAVE_ARCH_GATE_AREA
2345 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2346 extern int in_gate_area_no_mm(unsigned long addr);
2347 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2348 #else
2349 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2350 {
2351 return NULL;
2352 }
2353 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2354 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2355 {
2356 return 0;
2357 }
2358 #endif /* __HAVE_ARCH_GATE_AREA */
2359
2360 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2361
2362 #ifdef CONFIG_SYSCTL
2363 extern int sysctl_drop_caches;
2364 int drop_caches_sysctl_handler(struct ctl_table *, int,
2365 void __user *, size_t *, loff_t *);
2366 #endif
2367
2368 void drop_slab(void);
2369 void drop_slab_node(int nid);
2370
2371 #ifndef CONFIG_MMU
2372 #define randomize_va_space 0
2373 #else
2374 extern int randomize_va_space;
2375 #endif
2376
2377 const char * arch_vma_name(struct vm_area_struct *vma);
2378 void print_vma_addr(char *prefix, unsigned long rip);
2379
2380 void sparse_mem_maps_populate_node(struct page **map_map,
2381 unsigned long pnum_begin,
2382 unsigned long pnum_end,
2383 unsigned long map_count,
2384 int nodeid);
2385
2386 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2387 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2388 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2389 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2390 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2391 void *vmemmap_alloc_block(unsigned long size, int node);
2392 struct vmem_altmap;
2393 void *__vmemmap_alloc_block_buf(unsigned long size, int node,
2394 struct vmem_altmap *altmap);
2395 static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2396 {
2397 return __vmemmap_alloc_block_buf(size, node, NULL);
2398 }
2399
2400 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2401 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2402 int node);
2403 int vmemmap_populate(unsigned long start, unsigned long end, int node);
2404 void vmemmap_populate_print_last(void);
2405 #ifdef CONFIG_MEMORY_HOTPLUG
2406 void vmemmap_free(unsigned long start, unsigned long end);
2407 #endif
2408 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2409 unsigned long size);
2410
2411 enum mf_flags {
2412 MF_COUNT_INCREASED = 1 << 0,
2413 MF_ACTION_REQUIRED = 1 << 1,
2414 MF_MUST_KILL = 1 << 2,
2415 MF_SOFT_OFFLINE = 1 << 3,
2416 };
2417 extern int memory_failure(unsigned long pfn, int trapno, int flags);
2418 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2419 extern int unpoison_memory(unsigned long pfn);
2420 extern int get_hwpoison_page(struct page *page);
2421 #define put_hwpoison_page(page) put_page(page)
2422 extern int sysctl_memory_failure_early_kill;
2423 extern int sysctl_memory_failure_recovery;
2424 extern void shake_page(struct page *p, int access);
2425 extern atomic_long_t num_poisoned_pages;
2426 extern int soft_offline_page(struct page *page, int flags);
2427
2428
2429 /*
2430 * Error handlers for various types of pages.
2431 */
2432 enum mf_result {
2433 MF_IGNORED, /* Error: cannot be handled */
2434 MF_FAILED, /* Error: handling failed */
2435 MF_DELAYED, /* Will be handled later */
2436 MF_RECOVERED, /* Successfully recovered */
2437 };
2438
2439 enum mf_action_page_type {
2440 MF_MSG_KERNEL,
2441 MF_MSG_KERNEL_HIGH_ORDER,
2442 MF_MSG_SLAB,
2443 MF_MSG_DIFFERENT_COMPOUND,
2444 MF_MSG_POISONED_HUGE,
2445 MF_MSG_HUGE,
2446 MF_MSG_FREE_HUGE,
2447 MF_MSG_UNMAP_FAILED,
2448 MF_MSG_DIRTY_SWAPCACHE,
2449 MF_MSG_CLEAN_SWAPCACHE,
2450 MF_MSG_DIRTY_MLOCKED_LRU,
2451 MF_MSG_CLEAN_MLOCKED_LRU,
2452 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2453 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2454 MF_MSG_DIRTY_LRU,
2455 MF_MSG_CLEAN_LRU,
2456 MF_MSG_TRUNCATED_LRU,
2457 MF_MSG_BUDDY,
2458 MF_MSG_BUDDY_2ND,
2459 MF_MSG_UNKNOWN,
2460 };
2461
2462 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2463 extern void clear_huge_page(struct page *page,
2464 unsigned long addr,
2465 unsigned int pages_per_huge_page);
2466 extern void copy_user_huge_page(struct page *dst, struct page *src,
2467 unsigned long addr, struct vm_area_struct *vma,
2468 unsigned int pages_per_huge_page);
2469 extern long copy_huge_page_from_user(struct page *dst_page,
2470 const void __user *usr_src,
2471 unsigned int pages_per_huge_page,
2472 bool allow_pagefault);
2473 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2474
2475 extern struct page_ext_operations debug_guardpage_ops;
2476 extern struct page_ext_operations page_poisoning_ops;
2477
2478 #ifdef CONFIG_DEBUG_PAGEALLOC
2479 extern unsigned int _debug_guardpage_minorder;
2480 extern bool _debug_guardpage_enabled;
2481
2482 static inline unsigned int debug_guardpage_minorder(void)
2483 {
2484 return _debug_guardpage_minorder;
2485 }
2486
2487 static inline bool debug_guardpage_enabled(void)
2488 {
2489 return _debug_guardpage_enabled;
2490 }
2491
2492 static inline bool page_is_guard(struct page *page)
2493 {
2494 struct page_ext *page_ext;
2495
2496 if (!debug_guardpage_enabled())
2497 return false;
2498
2499 page_ext = lookup_page_ext(page);
2500 if (unlikely(!page_ext))
2501 return false;
2502
2503 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2504 }
2505 #else
2506 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2507 static inline bool debug_guardpage_enabled(void) { return false; }
2508 static inline bool page_is_guard(struct page *page) { return false; }
2509 #endif /* CONFIG_DEBUG_PAGEALLOC */
2510
2511 #if MAX_NUMNODES > 1
2512 void __init setup_nr_node_ids(void);
2513 #else
2514 static inline void setup_nr_node_ids(void) {}
2515 #endif
2516
2517 #endif /* __KERNEL__ */
2518 #endif /* _LINUX_MM_H */ 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with LOADs and STOREs inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /**
134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
135 * @lock: the spinlock in question.
136 */
137 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
138
139 #ifdef CONFIG_DEBUG_SPINLOCK
140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
142 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
144 #else
145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
146 {
147 __acquire(lock);
148 arch_spin_lock(&lock->raw_lock);
149 }
150
151 static inline void
152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
153 {
154 __acquire(lock);
155 arch_spin_lock_flags(&lock->raw_lock, *flags);
156 }
157
158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
159 {
160 return arch_spin_trylock(&(lock)->raw_lock);
161 }
162
163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
164 {
165 arch_spin_unlock(&lock->raw_lock);
166 __release(lock);
167 }
168 #endif
169
170 /*
171 * Define the various spin_lock methods. Note we define these
172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
173 * various methods are defined as nops in the case they are not
174 * required.
175 */
176 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
177
178 #define raw_spin_lock(lock) _raw_spin_lock(lock)
179
180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
181 # define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass)
183
184 # define raw_spin_lock_nest_lock(lock, nest_lock) \
185 do { \
186 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
187 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
188 } while (0)
189 #else
190 /*
191 * Always evaluate the 'subclass' argument to avoid that the compiler
192 * warns about set-but-not-used variables when building with
193 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
194 */
195 # define raw_spin_lock_nested(lock, subclass) \
196 _raw_spin_lock(((void)(subclass), (lock)))
197 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
198 #endif
199
200 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
201
202 #define raw_spin_lock_irqsave(lock, flags) \
203 do { \
204 typecheck(unsigned long, flags); \
205 flags = _raw_spin_lock_irqsave(lock); \
206 } while (0)
207
208 #ifdef CONFIG_DEBUG_LOCK_ALLOC
209 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
210 do { \
211 typecheck(unsigned long, flags); \
212 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
213 } while (0)
214 #else
215 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
216 do { \
217 typecheck(unsigned long, flags); \
218 flags = _raw_spin_lock_irqsave(lock); \
219 } while (0)
220 #endif
221
222 #else
223
224 #define raw_spin_lock_irqsave(lock, flags) \
225 do { \
226 typecheck(unsigned long, flags); \
227 _raw_spin_lock_irqsave(lock, flags); \
228 } while (0)
229
230 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
231 raw_spin_lock_irqsave(lock, flags)
232
233 #endif
234
235 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
236 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
237 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
238 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
239
240 #define raw_spin_unlock_irqrestore(lock, flags) \
241 do { \
242 typecheck(unsigned long, flags); \
243 _raw_spin_unlock_irqrestore(lock, flags); \
244 } while (0)
245 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
246
247 #define raw_spin_trylock_bh(lock) \
248 __cond_lock(lock, _raw_spin_trylock_bh(lock))
249
250 #define raw_spin_trylock_irq(lock) \
251 ({ \
252 local_irq_disable(); \
253 raw_spin_trylock(lock) ? \
254 1 : ({ local_irq_enable(); 0; }); \
255 })
256
257 #define raw_spin_trylock_irqsave(lock, flags) \
258 ({ \
259 local_irq_save(flags); \
260 raw_spin_trylock(lock) ? \
261 1 : ({ local_irq_restore(flags); 0; }); \
262 })
263
264 /**
265 * raw_spin_can_lock - would raw_spin_trylock() succeed?
266 * @lock: the spinlock in question.
267 */
268 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
269
270 /* Include rwlock functions */
271 #include <linux/rwlock.h>
272
273 /*
274 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
275 */
276 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
277 # include <linux/spinlock_api_smp.h>
278 #else
279 # include <linux/spinlock_api_up.h>
280 #endif
281
282 /*
283 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
284 */
285
286 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
287 {
288 return &lock->rlock;
289 }
290
291 #define spin_lock_init(_lock) \
292 do { \
293 spinlock_check(_lock); \
294 raw_spin_lock_init(&(_lock)->rlock); \
295 } while (0)
296
297 static __always_inline void spin_lock(spinlock_t *lock)
298 {
299 raw_spin_lock(&lock->rlock);
300 }
301
302 static __always_inline void spin_lock_bh(spinlock_t *lock)
303 {
304 raw_spin_lock_bh(&lock->rlock);
305 }
306
307 static __always_inline int spin_trylock(spinlock_t *lock)
308 {
309 return raw_spin_trylock(&lock->rlock);
310 }
311
312 #define spin_lock_nested(lock, subclass) \
313 do { \
314 raw_spin_lock_nested(spinlock_check(lock), subclass); \
315 } while (0)
316
317 #define spin_lock_nest_lock(lock, nest_lock) \
318 do { \
319 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
320 } while (0)
321
322 static __always_inline void spin_lock_irq(spinlock_t *lock)
323 {
324 raw_spin_lock_irq(&lock->rlock);
325 }
326
327 #define spin_lock_irqsave(lock, flags) \
328 do { \
329 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
330 } while (0)
331
332 #define spin_lock_irqsave_nested(lock, flags, subclass) \
333 do { \
334 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
335 } while (0)
336
337 static __always_inline void spin_unlock(spinlock_t *lock)
338 {
339 raw_spin_unlock(&lock->rlock);
340 }
341
342 static __always_inline void spin_unlock_bh(spinlock_t *lock)
343 {
344 raw_spin_unlock_bh(&lock->rlock);
345 }
346
347 static __always_inline void spin_unlock_irq(spinlock_t *lock)
348 {
349 raw_spin_unlock_irq(&lock->rlock);
350 }
351
352 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
353 {
354 raw_spin_unlock_irqrestore(&lock->rlock, flags);
355 }
356
357 static __always_inline int spin_trylock_bh(spinlock_t *lock)
358 {
359 return raw_spin_trylock_bh(&lock->rlock);
360 }
361
362 static __always_inline int spin_trylock_irq(spinlock_t *lock)
363 {
364 return raw_spin_trylock_irq(&lock->rlock);
365 }
366
367 #define spin_trylock_irqsave(lock, flags) \
368 ({ \
369 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
370 })
371
372 static __always_inline void spin_unlock_wait(spinlock_t *lock)
373 {
374 raw_spin_unlock_wait(&lock->rlock);
375 }
376
377 static __always_inline int spin_is_locked(spinlock_t *lock)
378 {
379 return raw_spin_is_locked(&lock->rlock);
380 }
381
382 static __always_inline int spin_is_contended(spinlock_t *lock)
383 {
384 return raw_spin_is_contended(&lock->rlock);
385 }
386
387 static __always_inline int spin_can_lock(spinlock_t *lock)
388 {
389 return raw_spin_can_lock(&lock->rlock);
390 }
391
392 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
393
394 /*
395 * Pull the atomic_t declaration:
396 * (asm-mips/atomic.h needs above definitions)
397 */
398 #include <linux/atomic.h>
399 /**
400 * atomic_dec_and_lock - lock on reaching reference count zero
401 * @atomic: the atomic counter
402 * @lock: the spinlock in question
403 *
404 * Decrements @atomic by 1. If the result is 0, returns true and locks
405 * @lock. Returns false for all other cases.
406 */
407 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
408 #define atomic_dec_and_lock(atomic, lock) \
409 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
410
411 #endif /* __LINUX_SPINLOCK_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-4.11-rc1.tar.xz | mm/z3fold.ko | 39_7a | CPAchecker | Bug | Fixed | 2017-03-11 00:23:38 | L0264 |
Comment
11 Mar 2017
[Home]