Error Trace
[Home]
Bug # 108
Show/hide error trace Error trace
{ 19 typedef signed char __s8; 20 typedef unsigned char __u8; 22 typedef short __s16; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 30 typedef unsigned long long __u64; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 33 typedef __u16 __be16; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 146 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 159 typedef unsigned int oom_flags_t; 177 struct __anonstruct_atomic_t_6 { int counter; } ; 177 typedef struct __anonstruct_atomic_t_6 atomic_t; 182 struct __anonstruct_atomic64_t_7 { long counter; } ; 182 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 183 struct list_head { struct list_head *next; struct list_head *prev; } ; 188 struct hlist_node ; 188 struct hlist_head { struct hlist_node *first; } ; 192 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 203 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 213 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 5 struct device ; 5 struct page ; 7 struct dma_attrs ; 10 struct task_struct ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 33 struct file ; 34 struct inode ; 35 struct dentry ; 36 struct user_namespace ; 99 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 59 struct __anonstruct____missing_field_name_10 { unsigned int a; unsigned int b; } ; 59 struct __anonstruct____missing_field_name_11 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 59 union __anonunion____missing_field_name_9 { struct __anonstruct____missing_field_name_10 __annonCompField4; struct __anonstruct____missing_field_name_11 __annonCompField5; } ; 59 struct desc_struct { union __anonunion____missing_field_name_9 __annonCompField6; } ; 15 typedef unsigned long pgdval_t; 16 typedef unsigned long pgprotval_t; 20 struct pgprot { pgprotval_t pgprot; } ; 243 typedef struct pgprot pgprot_t; 245 struct __anonstruct_pgd_t_13 { pgdval_t pgd; } ; 245 typedef struct __anonstruct_pgd_t_13 pgd_t; 333 typedef struct page *pgtable_t; 354 struct seq_file ; 389 struct thread_struct ; 391 struct mm_struct ; 392 struct cpumask ; 327 struct arch_spinlock ; 18 typedef u16 __ticket_t; 19 typedef u32 __ticketpair_t; 20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ; 32 union __anonunion____missing_field_name_16 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ; 32 struct arch_spinlock { union __anonunion____missing_field_name_16 __annonCompField7; } ; 33 typedef struct arch_spinlock arch_spinlock_t; 33 struct __anonstruct____missing_field_name_18 { u32 read; s32 write; } ; 33 union __anonunion_arch_rwlock_t_17 { s64 lock; struct __anonstruct____missing_field_name_18 __annonCompField8; } ; 33 typedef union __anonunion_arch_rwlock_t_17 arch_rwlock_t; 142 typedef void (*ctor_fn_t)(); 212 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 376 struct file_operations ; 388 struct completion ; 416 struct pid ; 527 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 102 struct timespec ; 127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ; 79 union __anonunion____missing_field_name_23 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ; 79 struct math_emu_info { long ___orig_eip; union __anonunion____missing_field_name_23 __annonCompField10; } ; 306 struct cpumask { unsigned long bits[128U]; } ; 14 typedef struct cpumask cpumask_t; 663 typedef struct cpumask *cpumask_var_t; 195 struct static_key ; 162 struct seq_operations ; 294 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 312 struct __anonstruct____missing_field_name_28 { u64 rip; u64 rdp; } ; 312 struct __anonstruct____missing_field_name_29 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 312 union __anonunion____missing_field_name_27 { struct __anonstruct____missing_field_name_28 __annonCompField14; struct __anonstruct____missing_field_name_29 __annonCompField15; } ; 312 union __anonunion____missing_field_name_30 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 312 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_27 __annonCompField16; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_30 __annonCompField17; } ; 346 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 367 struct ymmh_struct { u32 ymmh_space[64U]; } ; 372 struct lwp_struct { u8 reserved[128U]; } ; 377 struct bndregs_struct { u64 bndregs[8U]; } ; 381 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ; 386 struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2U]; u64 reserved2[5U]; } ; 392 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ; 401 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ; 409 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ; 456 struct kmem_cache ; 457 struct perf_event ; 458 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ; 23 typedef atomic64_t atomic_long_t; 152 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 26 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ; 537 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_34 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_33 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_34 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_33 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_35 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_35 rwlock_t; 59 struct static_key { atomic_t enabled; } ; 412 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 51 typedef struct seqcount seqcount_t; 433 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 254 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 40 struct rb_root { struct rb_node *rb_node; } ; 98 struct __anonstruct_nodemask_t_37 { unsigned long bits[16U]; } ; 98 typedef struct __anonstruct_nodemask_t_37 nodemask_t; 522 struct rw_semaphore ; 523 struct rw_semaphore { long count; raw_spinlock_t wait_lock; struct list_head wait_list; struct lockdep_map dep_map; } ; 34 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 39 typedef struct __wait_queue_head wait_queue_head_t; 919 struct completion { unsigned int done; wait_queue_head_t wait; } ; 113 struct vm_area_struct ; 114 struct notifier_block ; 58 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 1039 union ktime { s64 tv64; } ; 59 typedef union ktime ktime_t; 388 struct tvec_base ; 389 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 266 struct workqueue_struct ; 267 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 51 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ; 45 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 54 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 61 struct __anonstruct____missing_field_name_40 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 61 struct __anonstruct____missing_field_name_41 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 61 union __anonunion____missing_field_name_39 { struct __anonstruct____missing_field_name_40 __annonCompField22; struct __anonstruct____missing_field_name_41 __annonCompField23; } ; 61 struct uprobe ; 61 struct return_instance ; 61 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_39 __annonCompField24; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 93 struct xol_area ; 94 struct uprobes_state { struct xol_area *xol_area; } ; 22 struct __anonstruct_mm_context_t_42 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ; 22 typedef struct __anonstruct_mm_context_t_42 mm_context_t; 26 struct address_space ; 27 union __anonunion____missing_field_name_43 { struct address_space *mapping; void *s_mem; } ; 27 union __anonunion____missing_field_name_45 { unsigned long index; void *freelist; bool pfmemalloc; } ; 27 struct __anonstruct____missing_field_name_49 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 27 union __anonunion____missing_field_name_48 { atomic_t _mapcount; struct __anonstruct____missing_field_name_49 __annonCompField27; int units; } ; 27 struct __anonstruct____missing_field_name_47 { union __anonunion____missing_field_name_48 __annonCompField28; atomic_t _count; } ; 27 union __anonunion____missing_field_name_46 { unsigned long counters; struct __anonstruct____missing_field_name_47 __annonCompField29; unsigned int active; } ; 27 struct __anonstruct____missing_field_name_44 { union __anonunion____missing_field_name_45 __annonCompField26; union __anonunion____missing_field_name_46 __annonCompField30; } ; 27 struct __anonstruct____missing_field_name_51 { struct page *next; int pages; int pobjects; } ; 27 struct slab ; 27 union __anonunion____missing_field_name_50 { struct list_head lru; struct __anonstruct____missing_field_name_51 __annonCompField32; struct list_head list; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ; 27 union __anonunion____missing_field_name_52 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ; 27 struct page { unsigned long flags; union __anonunion____missing_field_name_43 __annonCompField25; struct __anonstruct____missing_field_name_44 __annonCompField31; union __anonunion____missing_field_name_50 __annonCompField33; union __anonunion____missing_field_name_52 __annonCompField34; unsigned long debug_flags; } ; 186 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 238 struct __anonstruct_linear_54 { struct rb_node rb; unsigned long rb_subtree_last; } ; 238 union __anonunion_shared_53 { struct __anonstruct_linear_54 linear; struct list_head nonlinear; } ; 238 struct anon_vma ; 238 struct vm_operations_struct ; 238 struct mempolicy ; 238 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_53 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ; 310 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 316 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 329 struct task_rss_stat { int events; int count[3U]; } ; 337 struct mm_rss_stat { atomic_long_t count[3U]; } ; 342 struct kioctx_table ; 343 struct linux_binfmt ; 343 struct mmu_notifier_mm ; 343 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; struct vm_area_struct *mmap_cache; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ; 4 typedef unsigned long cputime_t; 12 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 301 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 308 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 316 struct wakeup_source ; 527 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ; 534 struct dev_pm_qos ; 534 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool ignore_children; bool early_init; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; struct dev_pm_qos *qos; } ; 591 struct dev_pm_domain { struct dev_pm_ops ops; } ; 22 struct bio_vec ; 341 struct device_node ; 22 struct __anonstruct_kuid_t_146 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_146 kuid_t; 27 struct __anonstruct_kgid_t_147 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_147 kgid_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 24 struct __anonstruct_sigset_t_148 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_148 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_150 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_151 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_152 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_153 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__sigfault_154 { void *_addr; short _addr_lsb; } ; 11 struct __anonstruct__sigpoll_155 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_156 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_149 { int _pad[28U]; struct __anonstruct__kill_150 _kill; struct __anonstruct__timer_151 _timer; struct __anonstruct__rt_152 _rt; struct __anonstruct__sigchld_153 _sigchld; struct __anonstruct__sigfault_154 _sigfault; struct __anonstruct__sigpoll_155 _sigpoll; struct __anonstruct__sigsys_156 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_149 _sifields; } ; 109 typedef struct siginfo siginfo_t; 11 struct user_struct ; 21 struct sigpending { struct list_head list; sigset_t signal; } ; 251 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 265 struct k_sigaction { struct sigaction sa; } ; 448 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 455 struct pid_namespace ; 455 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 281 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 46 struct seccomp_filter ; 47 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ; 132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ; 163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 463 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 835 struct nsproxy ; 193 struct assoc_array_ptr ; 193 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct cred ; 38 struct key_type ; 42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 123 union __anonunion____missing_field_name_161 { struct list_head graveyard_link; struct rb_node serial_node; } ; 123 struct key_user ; 123 union __anonunion____missing_field_name_162 { time_t expiry; time_t revoked_at; } ; 123 struct __anonstruct____missing_field_name_164 { struct key_type *type; char *description; } ; 123 union __anonunion____missing_field_name_163 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_164 __annonCompField50; } ; 123 union __anonunion_type_data_165 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ; 123 union __anonunion_payload_167 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ; 123 union __anonunion____missing_field_name_166 { union __anonunion_payload_167 payload; struct assoc_array keys; } ; 123 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_161 __annonCompField48; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_162 __annonCompField49; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_163 __annonCompField51; union __anonunion_type_data_165 type_data; union __anonunion____missing_field_name_166 __annonCompField52; } ; 345 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 123 struct futex_pi_state ; 124 struct robust_list_head ; 125 struct bio_list ; 126 struct fs_struct ; 127 struct perf_event_context ; 128 struct blk_plug ; 180 struct cfs_rq ; 181 struct task_group ; 421 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 460 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 468 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 475 struct cputime { cputime_t utime; cputime_t stime; } ; 487 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 507 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ; 549 struct autogroup ; 550 struct tty_struct ; 550 struct taskstats ; 550 struct tty_audit_buf ; 550 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 730 struct user_struct { atomic_t __count; atomic_t processes; atomic_t files; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 774 struct backing_dev_info ; 775 struct reclaim_state ; 776 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 790 struct task_delay_info { spinlock_t lock; unsigned int flags; struct timespec blkio_start; struct timespec blkio_end; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; struct timespec freepages_start; struct timespec freepages_end; u64 freepages_delay; u32 freepages_count; } ; 976 struct io_context ; 1004 struct pipe_inode_info ; 1006 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1013 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ; 1025 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1060 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1091 struct rt_rq ; 1091 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1107 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; struct hrtimer dl_timer; } ; 1162 struct mem_cgroup ; 1162 struct memcg_batch_info { int do_batch; struct mem_cgroup *memcg; unsigned long nr_pages; unsigned long memsw_nr_pages; } ; 1569 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ; 1576 struct sched_class ; 1576 struct files_struct ; 1576 struct css_set ; 1576 struct compat_robust_list_head ; 1576 struct numa_group ; 1576 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char no_new_privs; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; struct timespec start_time; struct timespec real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct task_struct *pi_top_task; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; int numa_migrate_deferred; unsigned long numa_migrate_retry; u64 node_stamp; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long *numa_faults_buffer; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; struct memcg_batch_info memcg_batch; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ; 325 struct idr_layer { int prefix; unsigned long bitmap[4U]; struct idr_layer *ary[256U]; int count; int layer; struct callback_head callback_head; } ; 38 struct idr { struct idr_layer *hint; struct idr_layer *top; struct idr_layer *id_free; int layers; int id_free_cnt; int cur; spinlock_t lock; } ; 197 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 213 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 245 struct iattr ; 246 struct super_block ; 247 struct file_system_type ; 248 struct kernfs_open_node ; 249 struct kernfs_iattrs ; 265 struct kernfs_root ; 265 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 62 struct kernfs_node ; 62 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 66 struct kernfs_ops ; 66 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; } ; 72 union __anonunion_u_169 { struct completion *completion; struct kernfs_node *removed_list; } ; 72 union __anonunion____missing_field_name_170 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 72 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; union __anonunion_u_169 u; const void *ns; unsigned int hash; union __anonunion____missing_field_name_170 __annonCompField54; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 114 struct kernfs_dir_ops { int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ; 127 struct kernfs_root { struct kernfs_node *kn; struct ida ino_ida; struct kernfs_dir_ops *dir_ops; } ; 137 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; struct mutex mutex; int event; struct list_head list; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 151 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 376 struct sock ; 377 struct kobject ; 378 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 384 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 135 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct bin_attribute ; 37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 130 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 462 struct kref { atomic_t refcount; } ; 50 struct kset ; 50 struct kobj_type ; 50 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 112 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 120 struct kobj_uevent_env { char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 127 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 144 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 249 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ; 48 struct kmem_cache_order_objects { unsigned long x; } ; 58 struct memcg_cache_params ; 58 struct kmem_cache_node ; 58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ; 497 struct __anonstruct____missing_field_name_172 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ; 497 struct __anonstruct____missing_field_name_173 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; bool dead; atomic_t nr_pages; struct work_struct destroy; } ; 497 union __anonunion____missing_field_name_171 { struct __anonstruct____missing_field_name_172 __annonCompField55; struct __anonstruct____missing_field_name_173 __annonCompField56; } ; 497 struct memcg_cache_params { bool is_root_cache; union __anonunion____missing_field_name_171 __annonCompField57; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 48 struct kernel_param ; 53 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 58 struct kparam_string ; 58 struct kparam_array ; 58 union __anonunion____missing_field_name_178 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion____missing_field_name_178 __annonCompField58; } ; 70 struct kparam_string { unsigned int maxlen; char *string; } ; 76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 463 struct tracepoint ; 464 struct tracepoint_func { void *func; void *data; } ; 29 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 92 struct mod_arch_specific { } ; 36 struct module_param_attrs ; 36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 72 struct exception_table_entry ; 208 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 215 struct module_ref { unsigned long incs; unsigned long decs; } ; 229 struct module_sect_attrs ; 229 struct module_notes_attrs ; 229 struct ftrace_event_call ; 229 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_180 { spinlock_t lock; unsigned int count; } ; 114 union __anonunion____missing_field_name_179 { struct __anonstruct____missing_field_name_180 __annonCompField59; } ; 114 struct lockref { union __anonunion____missing_field_name_179 __annonCompField60; } ; 49 struct nameidata ; 50 struct path ; 51 struct vfsmount ; 52 struct __anonstruct____missing_field_name_182 { u32 hash; u32 len; } ; 52 union __anonunion____missing_field_name_181 { struct __anonstruct____missing_field_name_182 __annonCompField61; u64 hash_len; } ; 52 struct qstr { union __anonunion____missing_field_name_181 __annonCompField62; const unsigned char *name; } ; 90 struct dentry_operations ; 90 union __anonunion_d_u_183 { struct list_head d_child; struct callback_head d_rcu; } ; 90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_183 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ; 142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ; 469 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 26 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ; 28 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ; 58 struct radix_tree_node ; 58 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ; 26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 70 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 27 struct bio_set ; 28 struct bio ; 29 struct bio_integrity_payload ; 30 struct block_device ; 31 struct cgroup_subsys_state ; 19 typedef void bio_end_io_t(struct bio *, int); 21 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 30 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ; 40 struct bio { struct bio *bi_next; struct block_device *bi_bdev; unsigned long bi_flags; unsigned long bi_rw; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; struct bio_integrity_payload *bi_integrity; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ; 56 struct export_operations ; 58 struct iovec ; 59 struct kiocb ; 60 struct poll_table_struct ; 61 struct kstatfs ; 62 struct swap_info_struct ; 68 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 246 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ; 76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ; 151 typedef struct fs_qfilestat fs_qfilestat_t; 152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ; 166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ; 196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ; 212 struct dquot ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_184 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_184 kprojid_t; 119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ; 152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 60 typedef long long qsize_t; 61 union __anonunion____missing_field_name_185 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 61 struct kqid { union __anonunion____missing_field_name_185 __annonCompField63; enum quota_type type; } ; 178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ; 200 struct quota_format_type ; 201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ; 264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ; 302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ; 316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); } ; 333 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 379 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct rw_semaphore dqptr_sem; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ; 410 struct writeback_control ; 345 union __anonunion_arg_187 { char *buf; void *data; } ; 345 struct __anonstruct_read_descriptor_t_186 { size_t written; size_t count; union __anonunion_arg_187 arg; int error; } ; 345 typedef struct __anonstruct_read_descriptor_t_186 read_descriptor_t; 348 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *, loff_t , unsigned long); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 408 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; unsigned int i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 430 struct request_queue ; 431 struct hd_struct ; 431 struct gendisk ; 431 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 503 struct posix_acl ; 504 struct inode_operations ; 504 union __anonunion____missing_field_name_188 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 504 union __anonunion____missing_field_name_189 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 504 struct file_lock ; 504 struct cdev ; 504 union __anonunion____missing_field_name_190 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ; 504 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_188 __annonCompField64; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion____missing_field_name_189 __annonCompField65; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion____missing_field_name_190 __annonCompField66; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; atomic_t i_readcount; void *i_private; } ; 740 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 748 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 771 union __anonunion_f_u_191 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 771 struct file { union __anonunion_f_u_191 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; unsigned long f_mnt_write_state; } ; 909 typedef struct files_struct *fl_owner_t; 910 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 915 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ; 933 struct nlm_lockowner ; 934 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_193 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_192 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_193 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_192 fl_u; } ; 1036 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1228 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ; 1244 struct super_operations ; 1244 struct xattr_handler ; 1244 struct mtd_info ; 1244 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ; 1474 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1512 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1517 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ; 1555 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1600 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ; 1814 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 185 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; } ; 210 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ; 62 struct exception_table_entry { int insn; int fixup; } ; 124 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 11 typedef void * mempool_alloc_t(gfp_t , void *); 12 typedef void mempool_free_t(void *, void *); 13 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ; 24 typedef struct mempool_s mempool_t; 77 union __anonunion____missing_field_name_196 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ; 77 union __anonunion____missing_field_name_197 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ; 77 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_196 __annonCompField68; union __anonunion____missing_field_name_197 __annonCompField69; unsigned int flags; } ; 92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ; 269 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; void *bip_buf; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned char bip_owns_buf; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ; 479 struct bio_list { struct bio *head; struct bio *tail; } ; 600 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ; 673 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 67 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ; 35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 196 struct pinctrl ; 197 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 42 struct dma_map_ops ; 42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 14 struct device_private ; 15 struct device_driver ; 16 struct driver_private ; 17 struct class ; 18 struct subsys_private ; 19 struct bus_type ; 20 struct iommu_ops ; 21 struct iommu_group ; 60 struct device_attribute ; 60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 138 struct device_type ; 195 struct of_device_id ; 195 struct acpi_device_id ; 195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 321 struct class_attribute ; 321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 637 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 646 struct acpi_device ; 647 struct acpi_dev_node { struct acpi_device *companion; } ; 653 struct dma_coherent_mem ; 653 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 795 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 1196 struct dma_attrs { unsigned long flags[1U]; } ; 70 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 17 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 351 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 26 enum led_brightness { LED_OFF = 0, LED_HALF = 127, LED_FULL = 255 } ; 32 struct led_trigger ; 32 struct led_classdev { const char *name; int brightness; int max_brightness; int flags; void (*brightness_set)(struct led_classdev *, enum led_brightness ); enum led_brightness (*brightness_get)(struct led_classdev *); int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *); struct device *dev; struct list_head node; const char *default_trigger; unsigned long blink_delay_on; unsigned long blink_delay_off; struct timer_list blink_timer; int blink_brightness; struct work_struct set_brightness_work; int delayed_set_value; struct rw_semaphore trigger_lock; struct led_trigger *trigger; struct list_head trig_list; void *trigger_data; bool activated; } ; 140 struct led_trigger { const char *name; void (*activate)(struct led_classdev *); void (*deactivate)(struct led_classdev *); rwlock_t leddev_list_lock; struct list_head led_cdevs; struct list_head next_trig; } ; 103 struct fault_attr { unsigned long probability; unsigned long interval; atomic_t times; atomic_t space; unsigned long verbose; u32 task_filter; unsigned long stacktrace_depth; unsigned long require_start; unsigned long require_end; unsigned long reject_start; unsigned long reject_end; unsigned long count; } ; 619 struct mmc_data ; 620 struct mmc_request ; 621 struct mmc_command { u32 opcode; u32 arg; u32 resp[4U]; unsigned int flags; unsigned int retries; unsigned int error; unsigned int cmd_timeout_ms; bool sanitize_busy; struct mmc_data *data; struct mmc_request *mrq; } ; 105 struct mmc_data { unsigned int timeout_ns; unsigned int timeout_clks; unsigned int blksz; unsigned int blocks; unsigned int error; unsigned int flags; unsigned int bytes_xfered; struct mmc_command *stop; struct mmc_request *mrq; unsigned int sg_len; struct scatterlist *sg; s32 host_cookie; } ; 127 struct mmc_host ; 128 struct mmc_request { struct mmc_command *sbc; struct mmc_command *cmd; struct mmc_data *data; struct mmc_command *stop; struct completion completion; void (*done)(struct mmc_request *); struct mmc_host *host; } ; 139 struct mmc_card ; 140 struct mmc_async_req ; 25 typedef unsigned int mmc_pm_flag_t; 26 struct mmc_ios { unsigned int clock; unsigned short vdd; unsigned char bus_mode; unsigned char chip_select; unsigned char power_mode; unsigned char bus_width; unsigned char timing; unsigned char signal_voltage; unsigned char drv_type; } ; 77 struct mmc_host_ops { int (*enable)(struct mmc_host *); int (*disable)(struct mmc_host *); void (*post_req)(struct mmc_host *, struct mmc_request *, int); void (*pre_req)(struct mmc_host *, struct mmc_request *, bool ); void (*request)(struct mmc_host *, struct mmc_request *); void (*set_ios)(struct mmc_host *, struct mmc_ios *); int (*get_ro)(struct mmc_host *); int (*get_cd)(struct mmc_host *); void (*enable_sdio_irq)(struct mmc_host *, int); void (*init_card)(struct mmc_host *, struct mmc_card *); int (*start_signal_voltage_switch)(struct mmc_host *, struct mmc_ios *); int (*card_busy)(struct mmc_host *); int (*execute_tuning)(struct mmc_host *, u32 ); int (*select_drive_strength)(unsigned int, int, int); void (*hw_reset)(struct mmc_host *); void (*card_event)(struct mmc_host *); } ; 143 struct mmc_async_req { struct mmc_request *mrq; int (*err_check)(struct mmc_card *, struct mmc_async_req *); } ; 156 struct mmc_slot { int cd_irq; struct mutex lock; void *handler_priv; } ; 174 struct mmc_context_info { bool is_done_rcv; bool is_new_req; bool is_waiting_last_req; wait_queue_head_t wait; spinlock_t lock; } ; 190 struct regulator ; 191 struct mmc_supply { struct regulator *vmmc; struct regulator *vqmmc; } ; 197 struct mmc_bus_ops ; 197 struct mmc_host { struct device *parent; struct device class_dev; int index; const struct mmc_host_ops *ops; unsigned int f_min; unsigned int f_max; unsigned int f_init; u32 ocr_avail; u32 ocr_avail_sdio; u32 ocr_avail_sd; u32 ocr_avail_mmc; struct notifier_block pm_notify; u32 max_current_330; u32 max_current_300; u32 max_current_180; u32 caps; u32 caps2; mmc_pm_flag_t pm_caps; int clk_requests; unsigned int clk_delay; bool clk_gated; struct delayed_work clk_gate_work; unsigned int clk_old; spinlock_t clk_lock; struct mutex clk_gate_mutex; struct device_attribute clkgate_delay_attr; unsigned long clkgate_delay; unsigned int max_seg_size; unsigned short max_segs; unsigned short unused; unsigned int max_req_size; unsigned int max_blk_size; unsigned int max_blk_count; unsigned int max_discard_to; spinlock_t lock; struct mmc_ios ios; unsigned char use_spi_crc; unsigned char claimed; unsigned char bus_dead; unsigned char removed; int rescan_disable; int rescan_entered; struct mmc_card *card; wait_queue_head_t wq; struct task_struct *claimer; int claim_cnt; struct delayed_work detect; int detect_change; struct mmc_slot slot; const struct mmc_bus_ops *bus_ops; unsigned int bus_refs; unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; bool sdio_irq_pending; atomic_t sdio_irq_thread_abort; mmc_pm_flag_t pm_flags; struct led_trigger *led; bool regulator_enabled; struct mmc_supply supply; struct dentry *debugfs_root; struct mmc_async_req *areq; struct mmc_context_info context_info; struct fault_attr fail_mmc_request; unsigned int actual_clock; unsigned int slotno; unsigned long private[0U]; } ; 13 typedef unsigned long kernel_ulong_t; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ; 219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 54 struct kthread_work ; 65 struct kthread_worker { spinlock_t lock; struct list_head work_list; struct task_struct *task; struct kthread_work *current_work; } ; 72 struct kthread_work { struct list_head node; void (*func)(struct kthread_work *); wait_queue_head_t done; struct kthread_worker *worker; } ; 33 struct spi_master ; 33 struct spi_device { struct device dev; struct spi_master *master; u32 max_speed_hz; u8 chip_select; u8 bits_per_word; u16 mode; int irq; void *controller_state; void *controller_data; char modalias[32U]; int cs_gpio; } ; 152 struct spi_message ; 153 struct spi_transfer ; 210 struct spi_master { struct device dev; struct list_head list; s16 bus_num; u16 num_chipselect; u16 dma_alignment; u16 mode_bits; u32 bits_per_word_mask; u32 min_speed_hz; u32 max_speed_hz; u16 flags; spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; bool bus_lock_flag; int (*setup)(struct spi_device *); int (*transfer)(struct spi_device *, struct spi_message *); void (*cleanup)(struct spi_device *); bool queued; struct kthread_worker kworker; struct task_struct *kworker_task; struct kthread_work pump_messages; spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; bool busy; bool running; bool rt; bool auto_runtime_pm; bool cur_msg_prepared; struct completion xfer_completion; int (*prepare_transfer_hardware)(struct spi_master *); int (*transfer_one_message)(struct spi_master *, struct spi_message *); int (*unprepare_transfer_hardware)(struct spi_master *); int (*prepare_message)(struct spi_master *, struct spi_message *); int (*unprepare_message)(struct spi_master *, struct spi_message *); void (*set_cs)(struct spi_device *, bool ); int (*transfer_one)(struct spi_master *, struct spi_device *, struct spi_transfer *); int *cs_gpios; } ; 475 struct spi_transfer { const void *tx_buf; void *rx_buf; unsigned int len; dma_addr_t tx_dma; dma_addr_t rx_dma; unsigned char cs_change; unsigned char tx_nbits; unsigned char rx_nbits; u8 bits_per_word; u16 delay_usecs; u32 speed_hz; struct list_head transfer_list; } ; 595 struct spi_message { struct list_head transfers; struct spi_device *spi; unsigned char is_dma_mapped; void (*complete)(void *); void *context; unsigned int frame_length; unsigned int actual_length; int status; struct list_head queue; void *state; } ; 1013 struct mmc_spi_platform_data { int (*init)(struct device *, irqreturn_t (*)(int, void *), void *); void (*exit)(struct device *, void *); unsigned int flags; unsigned int cd_gpio; unsigned int cd_debounce; unsigned int ro_gpio; unsigned long caps; unsigned long caps2; u16 detect_delay; u16 powerup_msecs; u32 ocr_mask; void (*setpower)(struct device *, unsigned int); } ; 9 struct scratch { u8 status[29U]; u8 data_token; __be16 crc_val; } ; 125 struct mmc_spi_host { struct mmc_host *mmc; struct spi_device *spi; unsigned char power_mode; u16 powerup_msecs; struct mmc_spi_platform_data *pdata; struct spi_transfer token; struct spi_transfer t; struct spi_transfer crc; struct spi_transfer early_status; struct spi_message m; struct spi_transfer status; struct spi_message readback; struct device *dma_dev; struct scratch *data; dma_addr_t data_dma; void *ones; dma_addr_t ones_dma; } ; 38 typedef int Set; 1 long int __builtin_expect(long, long); 5 dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir); 7 dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 46 __u16 __fswab16(__u16 val); 154 __u16 __swab16p(const __u16 *p); 223 void __swab16s(__u16 *p); 53 int __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 165 void __might_sleep(const char *, int, int); 391 int snprintf(char *, size_t , const char *, ...); 24 void INIT_LIST_HEAD(struct list_head *list); 47 void __list_add(struct list_head *, struct list_head *, struct list_head *); 60 void list_add(struct list_head *new, struct list_head *head); 74 void list_add_tail(struct list_head *new, struct list_head *head); 71 void warn_slowpath_null(const char *, const int); 55 void * memset(void *, int, size_t ); 77 extern volatile unsigned long jiffies; 303 unsigned long int msecs_to_jiffies(const unsigned int); 304 unsigned long int usecs_to_jiffies(const unsigned int); 376 void schedule(); 46 void msleep(unsigned int); 142 void kfree(const void *); 302 void * __kmalloc(size_t , gfp_t ); 84 const char * kobject_name(const struct kobject *kobj); 441 void * kmalloc(size_t size, gfp_t flags); 888 void * lowmem_page_address(const struct page *page); 20 void flush_kernel_dcache_page(struct page *page); 56 void * kmap(struct page *page); 798 const char * dev_name(const struct device *dev); 913 void * dev_get_drvdata(const struct device *); 914 int dev_set_drvdata(struct device *, void *); 1029 int dev_err(const struct device *, const char *, ...); 1031 int dev_warn(const struct device *, const char *, ...); 1035 int _dev_info(const struct device *, const char *, ...); 95 struct page * sg_page(struct scatterlist *sg); 63 int valid_dma_direction(int dma_direction); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 59 void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int); 63 void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int); 30 extern struct dma_map_ops *dma_ops; 32 struct dma_map_ops * get_dma_ops(struct device *dev); 32 dma_addr_t ldv_dma_map_single_attrs_2(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 37 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 97 dma_addr_t ldv_dma_map_page_1(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir); 98 void dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 109 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 121 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 12 u8 crc7(u8 , const u8 *, size_t ); 20 u16 crc_itu_t(u16 , const u8 *, size_t ); 368 struct mmc_host * mmc_alloc_host(int, struct device *); 369 int mmc_add_host(struct mmc_host *); 370 void mmc_remove_host(struct mmc_host *); 371 void mmc_free_host(struct mmc_host *); 374 void * mmc_priv(struct mmc_host *host); 388 void mmc_detect_change(struct mmc_host *, unsigned long); 389 void mmc_request_done(struct mmc_host *, struct mmc_request *); 16 int mmc_gpio_get_ro(struct mmc_host *); 17 int mmc_gpio_request_ro(struct mmc_host *, unsigned int); 20 int mmc_gpio_get_cd(struct mmc_host *); 21 int mmc_gpio_request_cd(struct mmc_host *, unsigned int, unsigned int); 657 void spi_message_init(struct spi_message *m); 664 void spi_message_add_tail(struct spi_transfer *t, struct spi_message *m); 722 int spi_setup(struct spi_device *); 734 int spi_sync(struct spi_device *, struct spi_message *); 735 int spi_sync_locked(struct spi_device *, struct spi_message *); 736 int spi_bus_lock(struct spi_master *); 737 int spi_bus_unlock(struct spi_master *); 750 int spi_write(struct spi_device *spi, const void *buf, size_t len); 57 struct mmc_spi_platform_data * mmc_spi_get_pdata(struct spi_device *spi); 61 void mmc_spi_put_pdata(struct spi_device *spi); 165 int mmc_cs_off(struct mmc_spi_host *host); 172 int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len); 198 int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, unsigned int n, u8 byte); 231 int mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout); 236 int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout); 251 char * maptype(struct mmc_command *cmd); 263 int mmc_spi_response_get(struct mmc_spi_host *host, struct mmc_command *cmd, int cs_on); 446 int mmc_spi_command_send(struct mmc_spi_host *host, struct mmc_request *mrq, struct mmc_command *cmd, int cs_on); 575 void mmc_spi_setup_data_message(struct mmc_spi_host *host, int multiple, enum dma_data_direction direction); 675 int mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, unsigned long timeout); 785 int mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, unsigned long timeout); 885 void mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, struct mmc_data *data, u32 blk_size); 1057 void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq); 1138 void mmc_spi_initsequence(struct mmc_spi_host *host); 1178 char * mmc_powerstring(u8 power_mode); 1188 void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 1277 const struct mmc_host_ops mmc_spi_ops = { 0, 0, 0, 0, &mmc_spi_request, &mmc_spi_set_ios, &mmc_gpio_get_ro, &mmc_gpio_get_cd, 0, 0, 0, 0, 0, 0, 0, 0 }; 1292 irqreturn_t mmc_spi_detect_irq(int irq, void *mmc); 1301 int mmc_spi_probe(struct spi_device *spi); 1484 int mmc_spi_remove(struct spi_device *spi); 1555 void ldv_check_final_state(); 1558 void ldv_check_return_value(int); 1561 void ldv_check_return_value_probe(int); 1564 void ldv_initialize(); 1567 void ldv_handler_precall(); 1570 int nondet_int(); 1573 int LDV_IN_INTERRUPT = 0; 1576 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 25 int ldv_undef_int(); 26 void * ldv_undef_ptr(); 8 int LDV_DMA_MAP_CALLS = 0; 25 int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 41 dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir); return ; } { 1578 struct mmc_host *var_group1; 1579 struct mmc_request *var_group2; 1580 struct mmc_ios *var_group3; 1581 struct spi_device *var_group4; 1582 int res_mmc_spi_probe_17; 1583 int var_mmc_spi_detect_irq_16_p0; 1584 void *var_mmc_spi_detect_irq_16_p1; 1585 int ldv_s_mmc_spi_driver_spi_driver; 1586 int tmp; 1587 int tmp___0; 1693 ldv_s_mmc_spi_driver_spi_driver = 0; 1681 LDV_IN_INTERRUPT = 1; 1698 goto ldv_31321; 1698 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 1701 goto ldv_31320; 1699 ldv_31320:; 1702 tmp = nondet_int() { /* Function call is skipped due to function is undefined */} 1702 switch (tmp) { } 1303 void *ones; 1304 struct mmc_host *mmc; 1305 struct mmc_spi_host *host; 1306 int status; 1307 _Bool has_ro; 1308 struct _ddebug descriptor; 1309 long tmp; 1310 void *tmp___0; 1311 void *tmp___1; 1312 struct device *dev; 1313 const char *tmp___2; 1307 has_ro = 0; 1312 int __CPAchecker_TMP_0 = (int)(spi->master->flags); 1321 unsigned int __CPAchecker_TMP_1 = (unsigned int)(spi->mode); 1323 spi->bits_per_word = 8U; 1325 status = spi_setup(spi) { /* Function call is skipped due to function is undefined */} 1339 status = -12; { 443 void *tmp___2; 458 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */} } 1343 memset(ones, 255, 512UL) { /* Function call is skipped due to function is undefined */} 1345 mmc = mmc_alloc_host(568, &(spi->dev)) { /* Function call is skipped due to function is undefined */} 1349 mmc->ops = &mmc_spi_ops; 1350 mmc->max_blk_size = 512U; 1351 mmc->max_segs = 128U; 1352 mmc->max_req_size = 65536U; 1353 mmc->max_blk_count = 128U; 1355 mmc->caps = 16U; 1365 mmc->f_min = 400000U; 1366 mmc->f_max = spi->max_speed_hz; 1368 host = (struct mmc_spi_host *)tmp___0; 1369 host->mmc = mmc; 1370 host->spi = spi; 1372 host->ones = ones; 1378 unsigned long __CPAchecker_TMP_3 = (unsigned long)(host->pdata); 1379 mmc->ocr_avail = host->pdata->ocr_mask; 1384 unsigned long __CPAchecker_TMP_4 = (unsigned long)(host->pdata); 1384 unsigned long __CPAchecker_TMP_5 = (unsigned long)(host->pdata->setpower); 1384 assume(!(__CPAchecker_TMP_5 != ((unsigned long)((void (*)(struct device *, unsigned int))0)))); 1390 dev_set_drvdata(&(spi->dev), (void *)mmc) { /* Function call is skipped due to function is undefined */} { 443 void *tmp___2; 458 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */} } 1393 host->data = (struct scratch *)tmp___1; 1394 unsigned long __CPAchecker_TMP_8 = (unsigned long)(host->data); 1397 unsigned long __CPAchecker_TMP_9 = (unsigned long)(spi->master->dev.parent->dma_mask); 1398 dev = spi->master->dev.parent; 1400 host->dma_dev = dev; { 20 unsigned long long tmp; { } 58 unsigned long long nonedetermined; 59 void *tmp; 58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 58 nonedetermined = (dma_addr_t )tmp; 63 LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1; } 1403 void *__CPAchecker_TMP_10 = (void *)(host->data); { } 20 unsigned long long tmp; { } 58 unsigned long long nonedetermined; 59 void *tmp; 58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 58 nonedetermined = (dma_addr_t )tmp; } | Source code 1
2 /*
3 * mmc_spi.c - Access SD/MMC cards through SPI master controllers
4 *
5 * (C) Copyright 2005, Intec Automation,
6 * Mike Lavender (mike@steroidmicros)
7 * (C) Copyright 2006-2007, David Brownell
8 * (C) Copyright 2007, Axis Communications,
9 * Hans-Peter Nilsson (hp@axis.com)
10 * (C) Copyright 2007, ATRON electronic GmbH,
11 * Jan Nikitenko <jan.nikitenko@gmail.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28 #include <linux/sched.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <linux/module.h>
32 #include <linux/bio.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/crc7.h>
35 #include <linux/crc-itu-t.h>
36 #include <linux/scatterlist.h>
37
38 #include <linux/mmc/host.h>
39 #include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
40 #include <linux/mmc/slot-gpio.h>
41
42 #include <linux/spi/spi.h>
43 #include <linux/spi/mmc_spi.h>
44
45 #include <asm/unaligned.h>
46
47
48 /* NOTES:
49 *
50 * - For now, we won't try to interoperate with a real mmc/sd/sdio
51 * controller, although some of them do have hardware support for
52 * SPI protocol. The main reason for such configs would be mmc-ish
53 * cards like DataFlash, which don't support that "native" protocol.
54 *
55 * We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
56 * switch between driver stacks, and in any case if "native" mode
57 * is available, it will be faster and hence preferable.
58 *
59 * - MMC depends on a different chipselect management policy than the
60 * SPI interface currently supports for shared bus segments: it needs
61 * to issue multiple spi_message requests with the chipselect active,
62 * using the results of one message to decide the next one to issue.
63 *
64 * Pending updates to the programming interface, this driver expects
65 * that it not share the bus with other drivers (precluding conflicts).
66 *
67 * - We tell the controller to keep the chipselect active from the
68 * beginning of an mmc_host_ops.request until the end. So beware
69 * of SPI controller drivers that mis-handle the cs_change flag!
70 *
71 * However, many cards seem OK with chipselect flapping up/down
72 * during that time ... at least on unshared bus segments.
73 */
74
75
76 /*
77 * Local protocol constants, internal to data block protocols.
78 */
79
80 /* Response tokens used to ack each block written: */
81 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
82 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
83 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
84 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
85
86 /* Read and write blocks start with these tokens and end with crc;
87 * on error, read tokens act like a subset of R2_SPI_* values.
88 */
89 #define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */
90 #define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */
91 #define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */
92
93 #define MMC_SPI_BLOCKSIZE 512
94
95
96 /* These fixed timeouts come from the latest SD specs, which say to ignore
97 * the CSD values. The R1B value is for card erase (e.g. the "I forgot the
98 * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after
99 * reads which takes nowhere near that long. Older cards may be able to use
100 * shorter timeouts ... but why bother?
101 */
102 #define r1b_timeout (HZ * 3)
103
104 /* One of the critical speed parameters is the amount of data which may
105 * be transferred in one command. If this value is too low, the SD card
106 * controller has to do multiple partial block writes (argggh!). With
107 * today (2008) SD cards there is little speed gain if we transfer more
108 * than 64 KBytes at a time. So use this value until there is any indication
109 * that we should do more here.
110 */
111 #define MMC_SPI_BLOCKSATONCE 128
112
113 /****************************************************************************/
114
115 /*
116 * Local Data Structures
117 */
118
119 /* "scratch" is per-{command,block} data exchanged with the card */
120 struct scratch {
121 u8 status[29];
122 u8 data_token;
123 __be16 crc_val;
124 };
125
126 struct mmc_spi_host {
127 struct mmc_host *mmc;
128 struct spi_device *spi;
129
130 unsigned char power_mode;
131 u16 powerup_msecs;
132
133 struct mmc_spi_platform_data *pdata;
134
135 /* for bulk data transfers */
136 struct spi_transfer token, t, crc, early_status;
137 struct spi_message m;
138
139 /* for status readback */
140 struct spi_transfer status;
141 struct spi_message readback;
142
143 /* underlying DMA-aware controller, or null */
144 struct device *dma_dev;
145
146 /* buffer used for commands and for message "overhead" */
147 struct scratch *data;
148 dma_addr_t data_dma;
149
150 /* Specs say to write ones most of the time, even when the card
151 * has no need to read its input data; and many cards won't care.
152 * This is our source of those ones.
153 */
154 void *ones;
155 dma_addr_t ones_dma;
156 };
157
158
159 /****************************************************************************/
160
161 /*
162 * MMC-over-SPI protocol glue, used by the MMC stack interface
163 */
164
165 static inline int mmc_cs_off(struct mmc_spi_host *host)
166 {
167 /* chipselect will always be inactive after setup() */
168 return spi_setup(host->spi);
169 }
170
171 static int
172 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
173 {
174 int status;
175
176 if (len > sizeof(*host->data)) {
177 WARN_ON(1);
178 return -EIO;
179 }
180
181 host->status.len = len;
182
183 if (host->dma_dev)
184 dma_sync_single_for_device(host->dma_dev,
185 host->data_dma, sizeof(*host->data),
186 DMA_FROM_DEVICE);
187
188 status = spi_sync_locked(host->spi, &host->readback);
189
190 if (host->dma_dev)
191 dma_sync_single_for_cpu(host->dma_dev,
192 host->data_dma, sizeof(*host->data),
193 DMA_FROM_DEVICE);
194
195 return status;
196 }
197
198 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
199 unsigned n, u8 byte)
200 {
201 u8 *cp = host->data->status;
202 unsigned long start = jiffies;
203
204 while (1) {
205 int status;
206 unsigned i;
207
208 status = mmc_spi_readbytes(host, n);
209 if (status < 0)
210 return status;
211
212 for (i = 0; i < n; i++) {
213 if (cp[i] != byte)
214 return cp[i];
215 }
216
217 if (time_is_before_jiffies(start + timeout))
218 break;
219
220 /* If we need long timeouts, we may release the CPU.
221 * We use jiffies here because we want to have a relation
222 * between elapsed time and the blocking of the scheduler.
223 */
224 if (time_is_before_jiffies(start+1))
225 schedule();
226 }
227 return -ETIMEDOUT;
228 }
229
230 static inline int
231 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
232 {
233 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
234 }
235
236 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
237 {
238 return mmc_spi_skip(host, timeout, 1, 0xff);
239 }
240
241
242 /*
243 * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
244 * hosts return! The low byte holds R1_SPI bits. The next byte may hold
245 * R2_SPI bits ... for SEND_STATUS, or after data read errors.
246 *
247 * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
248 * newer cards R7 (IF_COND).
249 */
250
251 static char *maptype(struct mmc_command *cmd)
252 {
253 switch (mmc_spi_resp_type(cmd)) {
254 case MMC_RSP_SPI_R1: return "R1";
255 case MMC_RSP_SPI_R1B: return "R1B";
256 case MMC_RSP_SPI_R2: return "R2/R5";
257 case MMC_RSP_SPI_R3: return "R3/R4/R7";
258 default: return "?";
259 }
260 }
261
262 /* return zero, else negative errno after setting cmd->error */
263 static int mmc_spi_response_get(struct mmc_spi_host *host,
264 struct mmc_command *cmd, int cs_on)
265 {
266 u8 *cp = host->data->status;
267 u8 *end = cp + host->t.len;
268 int value = 0;
269 int bitshift;
270 u8 leftover = 0;
271 unsigned short rotator;
272 int i;
273 char tag[32];
274
275 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
276 cmd->opcode, maptype(cmd));
277
278 /* Except for data block reads, the whole response will already
279 * be stored in the scratch buffer. It's somewhere after the
280 * command and the first byte we read after it. We ignore that
281 * first byte. After STOP_TRANSMISSION command it may include
282 * two data bits, but otherwise it's all ones.
283 */
284 cp += 8;
285 while (cp < end && *cp == 0xff)
286 cp++;
287
288 /* Data block reads (R1 response types) may need more data... */
289 if (cp == end) {
290 cp = host->data->status;
291 end = cp+1;
292
293 /* Card sends N(CR) (== 1..8) bytes of all-ones then one
294 * status byte ... and we already scanned 2 bytes.
295 *
296 * REVISIT block read paths use nasty byte-at-a-time I/O
297 * so it can always DMA directly into the target buffer.
298 * It'd probably be better to memcpy() the first chunk and
299 * avoid extra i/o calls...
300 *
301 * Note we check for more than 8 bytes, because in practice,
302 * some SD cards are slow...
303 */
304 for (i = 2; i < 16; i++) {
305 value = mmc_spi_readbytes(host, 1);
306 if (value < 0)
307 goto done;
308 if (*cp != 0xff)
309 goto checkstatus;
310 }
311 value = -ETIMEDOUT;
312 goto done;
313 }
314
315 checkstatus:
316 bitshift = 0;
317 if (*cp & 0x80) {
318 /* Houston, we have an ugly card with a bit-shifted response */
319 rotator = *cp++ << 8;
320 /* read the next byte */
321 if (cp == end) {
322 value = mmc_spi_readbytes(host, 1);
323 if (value < 0)
324 goto done;
325 cp = host->data->status;
326 end = cp+1;
327 }
328 rotator |= *cp++;
329 while (rotator & 0x8000) {
330 bitshift++;
331 rotator <<= 1;
332 }
333 cmd->resp[0] = rotator >> 8;
334 leftover = rotator;
335 } else {
336 cmd->resp[0] = *cp++;
337 }
338 cmd->error = 0;
339
340 /* Status byte: the entire seven-bit R1 response. */
341 if (cmd->resp[0] != 0) {
342 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
343 & cmd->resp[0])
344 value = -EFAULT; /* Bad address */
345 else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
346 value = -ENOSYS; /* Function not implemented */
347 else if (R1_SPI_COM_CRC & cmd->resp[0])
348 value = -EILSEQ; /* Illegal byte sequence */
349 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
350 & cmd->resp[0])
351 value = -EIO; /* I/O error */
352 /* else R1_SPI_IDLE, "it's resetting" */
353 }
354
355 switch (mmc_spi_resp_type(cmd)) {
356
357 /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
358 * and less-common stuff like various erase operations.
359 */
360 case MMC_RSP_SPI_R1B:
361 /* maybe we read all the busy tokens already */
362 while (cp < end && *cp == 0)
363 cp++;
364 if (cp == end)
365 mmc_spi_wait_unbusy(host, r1b_timeout);
366 break;
367
368 /* SPI R2 == R1 + second status byte; SEND_STATUS
369 * SPI R5 == R1 + data byte; IO_RW_DIRECT
370 */
371 case MMC_RSP_SPI_R2:
372 /* read the next byte */
373 if (cp == end) {
374 value = mmc_spi_readbytes(host, 1);
375 if (value < 0)
376 goto done;
377 cp = host->data->status;
378 end = cp+1;
379 }
380 if (bitshift) {
381 rotator = leftover << 8;
382 rotator |= *cp << bitshift;
383 cmd->resp[0] |= (rotator & 0xFF00);
384 } else {
385 cmd->resp[0] |= *cp << 8;
386 }
387 break;
388
389 /* SPI R3, R4, or R7 == R1 + 4 bytes */
390 case MMC_RSP_SPI_R3:
391 rotator = leftover << 8;
392 cmd->resp[1] = 0;
393 for (i = 0; i < 4; i++) {
394 cmd->resp[1] <<= 8;
395 /* read the next byte */
396 if (cp == end) {
397 value = mmc_spi_readbytes(host, 1);
398 if (value < 0)
399 goto done;
400 cp = host->data->status;
401 end = cp+1;
402 }
403 if (bitshift) {
404 rotator |= *cp++ << bitshift;
405 cmd->resp[1] |= (rotator >> 8);
406 rotator <<= 8;
407 } else {
408 cmd->resp[1] |= *cp++;
409 }
410 }
411 break;
412
413 /* SPI R1 == just one status byte */
414 case MMC_RSP_SPI_R1:
415 break;
416
417 default:
418 dev_dbg(&host->spi->dev, "bad response type %04x\n",
419 mmc_spi_resp_type(cmd));
420 if (value >= 0)
421 value = -EINVAL;
422 goto done;
423 }
424
425 if (value < 0)
426 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
427 tag, cmd->resp[0], cmd->resp[1]);
428
429 /* disable chipselect on errors and some success cases */
430 if (value >= 0 && cs_on)
431 return value;
432 done:
433 if (value < 0)
434 cmd->error = value;
435 mmc_cs_off(host);
436 return value;
437 }
438
439 /* Issue command and read its response.
440 * Returns zero on success, negative for error.
441 *
442 * On error, caller must cope with mmc core retry mechanism. That
443 * means immediate low-level resubmit, which affects the bus lock...
444 */
445 static int
446 mmc_spi_command_send(struct mmc_spi_host *host,
447 struct mmc_request *mrq,
448 struct mmc_command *cmd, int cs_on)
449 {
450 struct scratch *data = host->data;
451 u8 *cp = data->status;
452 u32 arg = cmd->arg;
453 int status;
454 struct spi_transfer *t;
455
456 /* We can handle most commands (except block reads) in one full
457 * duplex I/O operation before either starting the next transfer
458 * (data block or command) or else deselecting the card.
459 *
460 * First, write 7 bytes:
461 * - an all-ones byte to ensure the card is ready
462 * - opcode byte (plus start and transmission bits)
463 * - four bytes of big-endian argument
464 * - crc7 (plus end bit) ... always computed, it's cheap
465 *
466 * We init the whole buffer to all-ones, which is what we need
467 * to write while we're reading (later) response data.
468 */
469 memset(cp++, 0xff, sizeof(data->status));
470
471 *cp++ = 0x40 | cmd->opcode;
472 *cp++ = (u8)(arg >> 24);
473 *cp++ = (u8)(arg >> 16);
474 *cp++ = (u8)(arg >> 8);
475 *cp++ = (u8)arg;
476 *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
477
478 /* Then, read up to 13 bytes (while writing all-ones):
479 * - N(CR) (== 1..8) bytes of all-ones
480 * - status byte (for all response types)
481 * - the rest of the response, either:
482 * + nothing, for R1 or R1B responses
483 * + second status byte, for R2 responses
484 * + four data bytes, for R3 and R7 responses
485 *
486 * Finally, read some more bytes ... in the nice cases we know in
487 * advance how many, and reading 1 more is always OK:
488 * - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
489 * - N(RC) (== 1..N) bytes of all-ones, before next command
490 * - N(WR) (== 1..N) bytes of all-ones, before data write
491 *
492 * So in those cases one full duplex I/O of at most 21 bytes will
493 * handle the whole command, leaving the card ready to receive a
494 * data block or new command. We do that whenever we can, shaving
495 * CPU and IRQ costs (especially when using DMA or FIFOs).
496 *
497 * There are two other cases, where it's not generally practical
498 * to rely on a single I/O:
499 *
500 * - R1B responses need at least N(EC) bytes of all-zeroes.
501 *
502 * In this case we can *try* to fit it into one I/O, then
503 * maybe read more data later.
504 *
505 * - Data block reads are more troublesome, since a variable
506 * number of padding bytes precede the token and data.
507 * + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
508 * + N(AC) (== 1..many) bytes of all-ones
509 *
510 * In this case we currently only have minimal speedups here:
511 * when N(CR) == 1 we can avoid I/O in response_get().
512 */
513 if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
514 cp += 2; /* min(N(CR)) + status */
515 /* R1 */
516 } else {
517 cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */
518 if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */
519 cp++;
520 else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */
521 cp += 4;
522 else if (cmd->flags & MMC_RSP_BUSY) /* R1B */
523 cp = data->status + sizeof(data->status);
524 /* else: R1 (most commands) */
525 }
526
527 dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n",
528 cmd->opcode, maptype(cmd));
529
530 /* send command, leaving chipselect active */
531 spi_message_init(&host->m);
532
533 t = &host->t;
534 memset(t, 0, sizeof(*t));
535 t->tx_buf = t->rx_buf = data->status;
536 t->tx_dma = t->rx_dma = host->data_dma;
537 t->len = cp - data->status;
538 t->cs_change = 1;
539 spi_message_add_tail(t, &host->m);
540
541 if (host->dma_dev) {
542 host->m.is_dma_mapped = 1;
543 dma_sync_single_for_device(host->dma_dev,
544 host->data_dma, sizeof(*host->data),
545 DMA_BIDIRECTIONAL);
546 }
547 status = spi_sync_locked(host->spi, &host->m);
548
549 if (host->dma_dev)
550 dma_sync_single_for_cpu(host->dma_dev,
551 host->data_dma, sizeof(*host->data),
552 DMA_BIDIRECTIONAL);
553 if (status < 0) {
554 dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
555 cmd->error = status;
556 return status;
557 }
558
559 /* after no-data commands and STOP_TRANSMISSION, chipselect off */
560 return mmc_spi_response_get(host, cmd, cs_on);
561 }
562
563 /* Build data message with up to four separate transfers. For TX, we
564 * start by writing the data token. And in most cases, we finish with
565 * a status transfer.
566 *
567 * We always provide TX data for data and CRC. The MMC/SD protocol
568 * requires us to write ones; but Linux defaults to writing zeroes;
569 * so we explicitly initialize it to all ones on RX paths.
570 *
571 * We also handle DMA mapping, so the underlying SPI controller does
572 * not need to (re)do it for each message.
573 */
574 static void
575 mmc_spi_setup_data_message(
576 struct mmc_spi_host *host,
577 int multiple,
578 enum dma_data_direction direction)
579 {
580 struct spi_transfer *t;
581 struct scratch *scratch = host->data;
582 dma_addr_t dma = host->data_dma;
583
584 spi_message_init(&host->m);
585 if (dma)
586 host->m.is_dma_mapped = 1;
587
588 /* for reads, readblock() skips 0xff bytes before finding
589 * the token; for writes, this transfer issues that token.
590 */
591 if (direction == DMA_TO_DEVICE) {
592 t = &host->token;
593 memset(t, 0, sizeof(*t));
594 t->len = 1;
595 if (multiple)
596 scratch->data_token = SPI_TOKEN_MULTI_WRITE;
597 else
598 scratch->data_token = SPI_TOKEN_SINGLE;
599 t->tx_buf = &scratch->data_token;
600 if (dma)
601 t->tx_dma = dma + offsetof(struct scratch, data_token);
602 spi_message_add_tail(t, &host->m);
603 }
604
605 /* Body of transfer is buffer, then CRC ...
606 * either TX-only, or RX with TX-ones.
607 */
608 t = &host->t;
609 memset(t, 0, sizeof(*t));
610 t->tx_buf = host->ones;
611 t->tx_dma = host->ones_dma;
612 /* length and actual buffer info are written later */
613 spi_message_add_tail(t, &host->m);
614
615 t = &host->crc;
616 memset(t, 0, sizeof(*t));
617 t->len = 2;
618 if (direction == DMA_TO_DEVICE) {
619 /* the actual CRC may get written later */
620 t->tx_buf = &scratch->crc_val;
621 if (dma)
622 t->tx_dma = dma + offsetof(struct scratch, crc_val);
623 } else {
624 t->tx_buf = host->ones;
625 t->tx_dma = host->ones_dma;
626 t->rx_buf = &scratch->crc_val;
627 if (dma)
628 t->rx_dma = dma + offsetof(struct scratch, crc_val);
629 }
630 spi_message_add_tail(t, &host->m);
631
632 /*
633 * A single block read is followed by N(EC) [0+] all-ones bytes
634 * before deselect ... don't bother.
635 *
636 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
637 * the next block is read, or a STOP_TRANSMISSION is issued. We'll
638 * collect that single byte, so readblock() doesn't need to.
639 *
640 * For a write, the one-byte data response follows immediately, then
641 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
642 * Then single block reads may deselect, and multiblock ones issue
643 * the next token (next data block, or STOP_TRAN). We can try to
644 * minimize I/O ops by using a single read to collect end-of-busy.
645 */
646 if (multiple || direction == DMA_TO_DEVICE) {
647 t = &host->early_status;
648 memset(t, 0, sizeof(*t));
649 t->len = (direction == DMA_TO_DEVICE)
650 ? sizeof(scratch->status)
651 : 1;
652 t->tx_buf = host->ones;
653 t->tx_dma = host->ones_dma;
654 t->rx_buf = scratch->status;
655 if (dma)
656 t->rx_dma = dma + offsetof(struct scratch, status);
657 t->cs_change = 1;
658 spi_message_add_tail(t, &host->m);
659 }
660 }
661
662 /*
663 * Write one block:
664 * - caller handled preceding N(WR) [1+] all-ones bytes
665 * - data block
666 * + token
667 * + data bytes
668 * + crc16
669 * - an all-ones byte ... card writes a data-response byte
670 * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
671 *
672 * Return negative errno, else success.
673 */
674 static int
675 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
676 unsigned long timeout)
677 {
678 struct spi_device *spi = host->spi;
679 int status, i;
680 struct scratch *scratch = host->data;
681 u32 pattern;
682
683 if (host->mmc->use_spi_crc)
684 scratch->crc_val = cpu_to_be16(
685 crc_itu_t(0, t->tx_buf, t->len));
686 if (host->dma_dev)
687 dma_sync_single_for_device(host->dma_dev,
688 host->data_dma, sizeof(*scratch),
689 DMA_BIDIRECTIONAL);
690
691 status = spi_sync_locked(spi, &host->m);
692
693 if (status != 0) {
694 dev_dbg(&spi->dev, "write error (%d)\n", status);
695 return status;
696 }
697
698 if (host->dma_dev)
699 dma_sync_single_for_cpu(host->dma_dev,
700 host->data_dma, sizeof(*scratch),
701 DMA_BIDIRECTIONAL);
702
703 /*
704 * Get the transmission data-response reply. It must follow
705 * immediately after the data block we transferred. This reply
706 * doesn't necessarily tell whether the write operation succeeded;
707 * it just says if the transmission was ok and whether *earlier*
708 * writes succeeded; see the standard.
709 *
710 * In practice, there are (even modern SDHC-)cards which are late
711 * in sending the response, and miss the time frame by a few bits,
712 * so we have to cope with this situation and check the response
713 * bit-by-bit. Arggh!!!
714 */
715 pattern = scratch->status[0] << 24;
716 pattern |= scratch->status[1] << 16;
717 pattern |= scratch->status[2] << 8;
718 pattern |= scratch->status[3];
719
720 /* First 3 bit of pattern are undefined */
721 pattern |= 0xE0000000;
722
723 /* left-adjust to leading 0 bit */
724 while (pattern & 0x80000000)
725 pattern <<= 1;
726 /* right-adjust for pattern matching. Code is in bit 4..0 now. */
727 pattern >>= 27;
728
729 switch (pattern) {
730 case SPI_RESPONSE_ACCEPTED:
731 status = 0;
732 break;
733 case SPI_RESPONSE_CRC_ERR:
734 /* host shall then issue MMC_STOP_TRANSMISSION */
735 status = -EILSEQ;
736 break;
737 case SPI_RESPONSE_WRITE_ERR:
738 /* host shall then issue MMC_STOP_TRANSMISSION,
739 * and should MMC_SEND_STATUS to sort it out
740 */
741 status = -EIO;
742 break;
743 default:
744 status = -EPROTO;
745 break;
746 }
747 if (status != 0) {
748 dev_dbg(&spi->dev, "write error %02x (%d)\n",
749 scratch->status[0], status);
750 return status;
751 }
752
753 t->tx_buf += t->len;
754 if (host->dma_dev)
755 t->tx_dma += t->len;
756
757 /* Return when not busy. If we didn't collect that status yet,
758 * we'll need some more I/O.
759 */
760 for (i = 4; i < sizeof(scratch->status); i++) {
761 /* card is non-busy if the most recent bit is 1 */
762 if (scratch->status[i] & 0x01)
763 return 0;
764 }
765 return mmc_spi_wait_unbusy(host, timeout);
766 }
767
768 /*
769 * Read one block:
770 * - skip leading all-ones bytes ... either
771 * + N(AC) [1..f(clock,CSD)] usually, else
772 * + N(CX) [0..8] when reading CSD or CID
773 * - data block
774 * + token ... if error token, no data or crc
775 * + data bytes
776 * + crc16
777 *
778 * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
779 * before dropping chipselect.
780 *
781 * For multiblock reads, caller either reads the next block or issues a
782 * STOP_TRANSMISSION command.
783 */
784 static int
785 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
786 unsigned long timeout)
787 {
788 struct spi_device *spi = host->spi;
789 int status;
790 struct scratch *scratch = host->data;
791 unsigned int bitshift;
792 u8 leftover;
793
794 /* At least one SD card sends an all-zeroes byte when N(CX)
795 * applies, before the all-ones bytes ... just cope with that.
796 */
797 status = mmc_spi_readbytes(host, 1);
798 if (status < 0)
799 return status;
800 status = scratch->status[0];
801 if (status == 0xff || status == 0)
802 status = mmc_spi_readtoken(host, timeout);
803
804 if (status < 0) {
805 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
806 return status;
807 }
808
809 /* The token may be bit-shifted...
810 * the first 0-bit precedes the data stream.
811 */
812 bitshift = 7;
813 while (status & 0x80) {
814 status <<= 1;
815 bitshift--;
816 }
817 leftover = status << 1;
818
819 if (host->dma_dev) {
820 dma_sync_single_for_device(host->dma_dev,
821 host->data_dma, sizeof(*scratch),
822 DMA_BIDIRECTIONAL);
823 dma_sync_single_for_device(host->dma_dev,
824 t->rx_dma, t->len,
825 DMA_FROM_DEVICE);
826 }
827
828 status = spi_sync_locked(spi, &host->m);
829
830 if (host->dma_dev) {
831 dma_sync_single_for_cpu(host->dma_dev,
832 host->data_dma, sizeof(*scratch),
833 DMA_BIDIRECTIONAL);
834 dma_sync_single_for_cpu(host->dma_dev,
835 t->rx_dma, t->len,
836 DMA_FROM_DEVICE);
837 }
838
839 if (bitshift) {
840 /* Walk through the data and the crc and do
841 * all the magic to get byte-aligned data.
842 */
843 u8 *cp = t->rx_buf;
844 unsigned int len;
845 unsigned int bitright = 8 - bitshift;
846 u8 temp;
847 for (len = t->len; len; len--) {
848 temp = *cp;
849 *cp++ = leftover | (temp >> bitshift);
850 leftover = temp << bitright;
851 }
852 cp = (u8 *) &scratch->crc_val;
853 temp = *cp;
854 *cp++ = leftover | (temp >> bitshift);
855 leftover = temp << bitright;
856 temp = *cp;
857 *cp = leftover | (temp >> bitshift);
858 }
859
860 if (host->mmc->use_spi_crc) {
861 u16 crc = crc_itu_t(0, t->rx_buf, t->len);
862
863 be16_to_cpus(&scratch->crc_val);
864 if (scratch->crc_val != crc) {
865 dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
866 "computed=0x%04x len=%d\n",
867 scratch->crc_val, crc, t->len);
868 return -EILSEQ;
869 }
870 }
871
872 t->rx_buf += t->len;
873 if (host->dma_dev)
874 t->rx_dma += t->len;
875
876 return 0;
877 }
878
879 /*
880 * An MMC/SD data stage includes one or more blocks, optional CRCs,
881 * and inline handshaking. That handhaking makes it unlike most
882 * other SPI protocol stacks.
883 */
884 static void
885 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
886 struct mmc_data *data, u32 blk_size)
887 {
888 struct spi_device *spi = host->spi;
889 struct device *dma_dev = host->dma_dev;
890 struct spi_transfer *t;
891 enum dma_data_direction direction;
892 struct scatterlist *sg;
893 unsigned n_sg;
894 int multiple = (data->blocks > 1);
895 u32 clock_rate;
896 unsigned long timeout;
897
898 if (data->flags & MMC_DATA_READ)
899 direction = DMA_FROM_DEVICE;
900 else
901 direction = DMA_TO_DEVICE;
902 mmc_spi_setup_data_message(host, multiple, direction);
903 t = &host->t;
904
905 if (t->speed_hz)
906 clock_rate = t->speed_hz;
907 else
908 clock_rate = spi->max_speed_hz;
909
910 timeout = data->timeout_ns +
911 data->timeout_clks * 1000000 / clock_rate;
912 timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
913
914 /* Handle scatterlist segments one at a time, with synch for
915 * each 512-byte block
916 */
917 for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) {
918 int status = 0;
919 dma_addr_t dma_addr = 0;
920 void *kmap_addr;
921 unsigned length = sg->length;
922 enum dma_data_direction dir = direction;
923
924 /* set up dma mapping for controller drivers that might
925 * use DMA ... though they may fall back to PIO
926 */
927 if (dma_dev) {
928 /* never invalidate whole *shared* pages ... */
929 if ((sg->offset != 0 || length != PAGE_SIZE)
930 && dir == DMA_FROM_DEVICE)
931 dir = DMA_BIDIRECTIONAL;
932
933 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
934 PAGE_SIZE, dir);
935 if (direction == DMA_TO_DEVICE)
936 t->tx_dma = dma_addr + sg->offset;
937 else
938 t->rx_dma = dma_addr + sg->offset;
939 }
940
941 /* allow pio too; we don't allow highmem */
942 kmap_addr = kmap(sg_page(sg));
943 if (direction == DMA_TO_DEVICE)
944 t->tx_buf = kmap_addr + sg->offset;
945 else
946 t->rx_buf = kmap_addr + sg->offset;
947
948 /* transfer each block, and update request status */
949 while (length) {
950 t->len = min(length, blk_size);
951
952 dev_dbg(&host->spi->dev,
953 " mmc_spi: %s block, %d bytes\n",
954 (direction == DMA_TO_DEVICE)
955 ? "write"
956 : "read",
957 t->len);
958
959 if (direction == DMA_TO_DEVICE)
960 status = mmc_spi_writeblock(host, t, timeout);
961 else
962 status = mmc_spi_readblock(host, t, timeout);
963 if (status < 0)
964 break;
965
966 data->bytes_xfered += t->len;
967 length -= t->len;
968
969 if (!multiple)
970 break;
971 }
972
973 /* discard mappings */
974 if (direction == DMA_FROM_DEVICE)
975 flush_kernel_dcache_page(sg_page(sg));
976 kunmap(sg_page(sg));
977 if (dma_dev)
978 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
979
980 if (status < 0) {
981 data->error = status;
982 dev_dbg(&spi->dev, "%s status %d\n",
983 (direction == DMA_TO_DEVICE)
984 ? "write" : "read",
985 status);
986 break;
987 }
988 }
989
990 /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
991 * can be issued before multiblock writes. Unlike its more widely
992 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
993 * that can affect the STOP_TRAN logic. Complete (and current)
994 * MMC specs should sort that out before Linux starts using CMD23.
995 */
996 if (direction == DMA_TO_DEVICE && multiple) {
997 struct scratch *scratch = host->data;
998 int tmp;
999 const unsigned statlen = sizeof(scratch->status);
1000
1001 dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n");
1002
1003 /* Tweak the per-block message we set up earlier by morphing
1004 * it to hold single buffer with the token followed by some
1005 * all-ones bytes ... skip N(BR) (0..1), scan the rest for
1006 * "not busy any longer" status, and leave chip selected.
1007 */
1008 INIT_LIST_HEAD(&host->m.transfers);
1009 list_add(&host->early_status.transfer_list,
1010 &host->m.transfers);
1011
1012 memset(scratch->status, 0xff, statlen);
1013 scratch->status[0] = SPI_TOKEN_STOP_TRAN;
1014
1015 host->early_status.tx_buf = host->early_status.rx_buf;
1016 host->early_status.tx_dma = host->early_status.rx_dma;
1017 host->early_status.len = statlen;
1018
1019 if (host->dma_dev)
1020 dma_sync_single_for_device(host->dma_dev,
1021 host->data_dma, sizeof(*scratch),
1022 DMA_BIDIRECTIONAL);
1023
1024 tmp = spi_sync_locked(spi, &host->m);
1025
1026 if (host->dma_dev)
1027 dma_sync_single_for_cpu(host->dma_dev,
1028 host->data_dma, sizeof(*scratch),
1029 DMA_BIDIRECTIONAL);
1030
1031 if (tmp < 0) {
1032 if (!data->error)
1033 data->error = tmp;
1034 return;
1035 }
1036
1037 /* Ideally we collected "not busy" status with one I/O,
1038 * avoiding wasteful byte-at-a-time scanning... but more
1039 * I/O is often needed.
1040 */
1041 for (tmp = 2; tmp < statlen; tmp++) {
1042 if (scratch->status[tmp] != 0)
1043 return;
1044 }
1045 tmp = mmc_spi_wait_unbusy(host, timeout);
1046 if (tmp < 0 && !data->error)
1047 data->error = tmp;
1048 }
1049 }
1050
1051 /****************************************************************************/
1052
1053 /*
1054 * MMC driver implementation -- the interface to the MMC stack
1055 */
1056
1057 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1058 {
1059 struct mmc_spi_host *host = mmc_priv(mmc);
1060 int status = -EINVAL;
1061 int crc_retry = 5;
1062 struct mmc_command stop;
1063
1064 #ifdef DEBUG
1065 /* MMC core and layered drivers *MUST* issue SPI-aware commands */
1066 {
1067 struct mmc_command *cmd;
1068 int invalid = 0;
1069
1070 cmd = mrq->cmd;
1071 if (!mmc_spi_resp_type(cmd)) {
1072 dev_dbg(&host->spi->dev, "bogus command\n");
1073 cmd->error = -EINVAL;
1074 invalid = 1;
1075 }
1076
1077 cmd = mrq->stop;
1078 if (cmd && !mmc_spi_resp_type(cmd)) {
1079 dev_dbg(&host->spi->dev, "bogus STOP command\n");
1080 cmd->error = -EINVAL;
1081 invalid = 1;
1082 }
1083
1084 if (invalid) {
1085 dump_stack();
1086 mmc_request_done(host->mmc, mrq);
1087 return;
1088 }
1089 }
1090 #endif
1091
1092 /* request exclusive bus access */
1093 spi_bus_lock(host->spi->master);
1094
1095 crc_recover:
1096 /* issue command; then optionally data and stop */
1097 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1098 if (status == 0 && mrq->data) {
1099 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1100
1101 /*
1102 * The SPI bus is not always reliable for large data transfers.
1103 * If an occasional crc error is reported by the SD device with
1104 * data read/write over SPI, it may be recovered by repeating
1105 * the last SD command again. The retry count is set to 5 to
1106 * ensure the driver passes stress tests.
1107 */
1108 if (mrq->data->error == -EILSEQ && crc_retry) {
1109 stop.opcode = MMC_STOP_TRANSMISSION;
1110 stop.arg = 0;
1111 stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1112 status = mmc_spi_command_send(host, mrq, &stop, 0);
1113 crc_retry--;
1114 mrq->data->error = 0;
1115 goto crc_recover;
1116 }
1117
1118 if (mrq->stop)
1119 status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1120 else
1121 mmc_cs_off(host);
1122 }
1123
1124 /* release the bus */
1125 spi_bus_unlock(host->spi->master);
1126
1127 mmc_request_done(host->mmc, mrq);
1128 }
1129
1130 /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
1131 *
1132 * NOTE that here we can't know that the card has just been powered up;
1133 * not all MMC/SD sockets support power switching.
1134 *
1135 * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
1136 * this doesn't seem to do the right thing at all...
1137 */
1138 static void mmc_spi_initsequence(struct mmc_spi_host *host)
1139 {
1140 /* Try to be very sure any previous command has completed;
1141 * wait till not-busy, skip debris from any old commands.
1142 */
1143 mmc_spi_wait_unbusy(host, r1b_timeout);
1144 mmc_spi_readbytes(host, 10);
1145
1146 /*
1147 * Do a burst with chipselect active-high. We need to do this to
1148 * meet the requirement of 74 clock cycles with both chipselect
1149 * and CMD (MOSI) high before CMD0 ... after the card has been
1150 * powered up to Vdd(min), and so is ready to take commands.
1151 *
1152 * Some cards are particularly needy of this (e.g. Viking "SD256")
1153 * while most others don't seem to care.
1154 *
1155 * Note that this is one of the places MMC/SD plays games with the
1156 * SPI protocol. Another is that when chipselect is released while
1157 * the card returns BUSY status, the clock must issue several cycles
1158 * with chipselect high before the card will stop driving its output.
1159 */
1160 host->spi->mode |= SPI_CS_HIGH;
1161 if (spi_setup(host->spi) != 0) {
1162 /* Just warn; most cards work without it. */
1163 dev_warn(&host->spi->dev,
1164 "can't change chip-select polarity\n");
1165 host->spi->mode &= ~SPI_CS_HIGH;
1166 } else {
1167 mmc_spi_readbytes(host, 18);
1168
1169 host->spi->mode &= ~SPI_CS_HIGH;
1170 if (spi_setup(host->spi) != 0) {
1171 /* Wot, we can't get the same setup we had before? */
1172 dev_err(&host->spi->dev,
1173 "can't restore chip-select polarity\n");
1174 }
1175 }
1176 }
1177
1178 static char *mmc_powerstring(u8 power_mode)
1179 {
1180 switch (power_mode) {
1181 case MMC_POWER_OFF: return "off";
1182 case MMC_POWER_UP: return "up";
1183 case MMC_POWER_ON: return "on";
1184 }
1185 return "?";
1186 }
1187
1188 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1189 {
1190 struct mmc_spi_host *host = mmc_priv(mmc);
1191
1192 if (host->power_mode != ios->power_mode) {
1193 int canpower;
1194
1195 canpower = host->pdata && host->pdata->setpower;
1196
1197 dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
1198 mmc_powerstring(ios->power_mode),
1199 ios->vdd,
1200 canpower ? ", can switch" : "");
1201
1202 /* switch power on/off if possible, accounting for
1203 * max 250msec powerup time if needed.
1204 */
1205 if (canpower) {
1206 switch (ios->power_mode) {
1207 case MMC_POWER_OFF:
1208 case MMC_POWER_UP:
1209 host->pdata->setpower(&host->spi->dev,
1210 ios->vdd);
1211 if (ios->power_mode == MMC_POWER_UP)
1212 msleep(host->powerup_msecs);
1213 }
1214 }
1215
1216 /* See 6.4.1 in the simplified SD card physical spec 2.0 */
1217 if (ios->power_mode == MMC_POWER_ON)
1218 mmc_spi_initsequence(host);
1219
1220 /* If powering down, ground all card inputs to avoid power
1221 * delivery from data lines! On a shared SPI bus, this
1222 * will probably be temporary; 6.4.2 of the simplified SD
1223 * spec says this must last at least 1msec.
1224 *
1225 * - Clock low means CPOL 0, e.g. mode 0
1226 * - MOSI low comes from writing zero
1227 * - Chipselect is usually active low...
1228 */
1229 if (canpower && ios->power_mode == MMC_POWER_OFF) {
1230 int mres;
1231 u8 nullbyte = 0;
1232
1233 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1234 mres = spi_setup(host->spi);
1235 if (mres < 0)
1236 dev_dbg(&host->spi->dev,
1237 "switch to SPI mode 0 failed\n");
1238
1239 if (spi_write(host->spi, &nullbyte, 1) < 0)
1240 dev_dbg(&host->spi->dev,
1241 "put spi signals to low failed\n");
1242
1243 /*
1244 * Now clock should be low due to spi mode 0;
1245 * MOSI should be low because of written 0x00;
1246 * chipselect should be low (it is active low)
1247 * power supply is off, so now MMC is off too!
1248 *
1249 * FIXME no, chipselect can be high since the
1250 * device is inactive and SPI_CS_HIGH is clear...
1251 */
1252 msleep(10);
1253 if (mres == 0) {
1254 host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1255 mres = spi_setup(host->spi);
1256 if (mres < 0)
1257 dev_dbg(&host->spi->dev,
1258 "switch back to SPI mode 3"
1259 " failed\n");
1260 }
1261 }
1262
1263 host->power_mode = ios->power_mode;
1264 }
1265
1266 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1267 int status;
1268
1269 host->spi->max_speed_hz = ios->clock;
1270 status = spi_setup(host->spi);
1271 dev_dbg(&host->spi->dev,
1272 "mmc_spi: clock to %d Hz, %d\n",
1273 host->spi->max_speed_hz, status);
1274 }
1275 }
1276
1277 static const struct mmc_host_ops mmc_spi_ops = {
1278 .request = mmc_spi_request,
1279 .set_ios = mmc_spi_set_ios,
1280 .get_ro = mmc_gpio_get_ro,
1281 .get_cd = mmc_gpio_get_cd,
1282 };
1283
1284
1285 /****************************************************************************/
1286
1287 /*
1288 * SPI driver implementation
1289 */
1290
1291 static irqreturn_t
1292 mmc_spi_detect_irq(int irq, void *mmc)
1293 {
1294 struct mmc_spi_host *host = mmc_priv(mmc);
1295 u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1296
1297 mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1298 return IRQ_HANDLED;
1299 }
1300
1301 static int mmc_spi_probe(struct spi_device *spi)
1302 {
1303 void *ones;
1304 struct mmc_host *mmc;
1305 struct mmc_spi_host *host;
1306 int status;
1307 bool has_ro = false;
1308
1309 /* We rely on full duplex transfers, mostly to reduce
1310 * per-transfer overheads (by making fewer transfers).
1311 */
1312 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1313 return -EINVAL;
1314
1315 /* MMC and SD specs only seem to care that sampling is on the
1316 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode
1317 * should be legit. We'll use mode 0 since the steady state is 0,
1318 * which is appropriate for hotplugging, unless the platform data
1319 * specify mode 3 (if hardware is not compatible to mode 0).
1320 */
1321 if (spi->mode != SPI_MODE_3)
1322 spi->mode = SPI_MODE_0;
1323 spi->bits_per_word = 8;
1324
1325 status = spi_setup(spi);
1326 if (status < 0) {
1327 dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1328 spi->mode, spi->max_speed_hz / 1000,
1329 status);
1330 return status;
1331 }
1332
1333 /* We need a supply of ones to transmit. This is the only time
1334 * the CPU touches these, so cache coherency isn't a concern.
1335 *
1336 * NOTE if many systems use more than one MMC-over-SPI connector
1337 * it'd save some memory to share this. That's evidently rare.
1338 */
1339 status = -ENOMEM;
1340 ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1341 if (!ones)
1342 goto nomem;
1343 memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1344
1345 mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1346 if (!mmc)
1347 goto nomem;
1348
1349 mmc->ops = &mmc_spi_ops;
1350 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1351 mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1352 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1353 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1354
1355 mmc->caps = MMC_CAP_SPI;
1356
1357 /* SPI doesn't need the lowspeed device identification thing for
1358 * MMC or SD cards, since it never comes up in open drain mode.
1359 * That's good; some SPI masters can't handle very low speeds!
1360 *
1361 * However, low speed SDIO cards need not handle over 400 KHz;
1362 * that's the only reason not to use a few MHz for f_min (until
1363 * the upper layer reads the target frequency from the CSD).
1364 */
1365 mmc->f_min = 400000;
1366 mmc->f_max = spi->max_speed_hz;
1367
1368 host = mmc_priv(mmc);
1369 host->mmc = mmc;
1370 host->spi = spi;
1371
1372 host->ones = ones;
1373
1374 /* Platform data is used to hook up things like card sensing
1375 * and power switching gpios.
1376 */
1377 host->pdata = mmc_spi_get_pdata(spi);
1378 if (host->pdata)
1379 mmc->ocr_avail = host->pdata->ocr_mask;
1380 if (!mmc->ocr_avail) {
1381 dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1382 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1383 }
1384 if (host->pdata && host->pdata->setpower) {
1385 host->powerup_msecs = host->pdata->powerup_msecs;
1386 if (!host->powerup_msecs || host->powerup_msecs > 250)
1387 host->powerup_msecs = 250;
1388 }
1389
1390 dev_set_drvdata(&spi->dev, mmc);
1391
1392 /* preallocate dma buffers */
1393 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1394 if (!host->data)
1395 goto fail_nobuf1;
1396
1397 if (spi->master->dev.parent->dma_mask) {
1398 struct device *dev = spi->master->dev.parent;
1399
1400 host->dma_dev = dev;
1401 host->ones_dma = dma_map_single(dev, ones,
1402 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1403 host->data_dma = dma_map_single(dev, host->data,
1404 sizeof(*host->data), DMA_BIDIRECTIONAL);
1405
1406 /* REVISIT in theory those map operations can fail... */
1407
1408 dma_sync_single_for_cpu(host->dma_dev,
1409 host->data_dma, sizeof(*host->data),
1410 DMA_BIDIRECTIONAL);
1411 }
1412
1413 /* setup message for status/busy readback */
1414 spi_message_init(&host->readback);
1415 host->readback.is_dma_mapped = (host->dma_dev != NULL);
1416
1417 spi_message_add_tail(&host->status, &host->readback);
1418 host->status.tx_buf = host->ones;
1419 host->status.tx_dma = host->ones_dma;
1420 host->status.rx_buf = &host->data->status;
1421 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1422 host->status.cs_change = 1;
1423
1424 /* register card detect irq */
1425 if (host->pdata && host->pdata->init) {
1426 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1427 if (status != 0)
1428 goto fail_glue_init;
1429 }
1430
1431 /* pass platform capabilities, if any */
1432 if (host->pdata) {
1433 mmc->caps |= host->pdata->caps;
1434 mmc->caps2 |= host->pdata->caps2;
1435 }
1436
1437 status = mmc_add_host(mmc);
1438 if (status != 0)
1439 goto fail_add_host;
1440
1441 if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) {
1442 status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio,
1443 host->pdata->cd_debounce);
1444 if (status != 0)
1445 goto fail_add_host;
1446 }
1447
1448 if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
1449 has_ro = true;
1450 status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio);
1451 if (status != 0)
1452 goto fail_add_host;
1453 }
1454
1455 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1456 dev_name(&mmc->class_dev),
1457 host->dma_dev ? "" : ", no DMA",
1458 has_ro ? "" : ", no WP",
1459 (host->pdata && host->pdata->setpower)
1460 ? "" : ", no poweroff",
1461 (mmc->caps & MMC_CAP_NEEDS_POLL)
1462 ? ", cd polling" : "");
1463 return 0;
1464
1465 fail_add_host:
1466 mmc_remove_host (mmc);
1467 fail_glue_init:
1468 if (host->dma_dev)
1469 dma_unmap_single(host->dma_dev, host->data_dma,
1470 sizeof(*host->data), DMA_BIDIRECTIONAL);
1471 kfree(host->data);
1472
1473 fail_nobuf1:
1474 mmc_free_host(mmc);
1475 mmc_spi_put_pdata(spi);
1476 dev_set_drvdata(&spi->dev, NULL);
1477
1478 nomem:
1479 kfree(ones);
1480 return status;
1481 }
1482
1483
1484 static int mmc_spi_remove(struct spi_device *spi)
1485 {
1486 struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
1487 struct mmc_spi_host *host;
1488
1489 if (mmc) {
1490 host = mmc_priv(mmc);
1491
1492 /* prevent new mmc_detect_change() calls */
1493 if (host->pdata && host->pdata->exit)
1494 host->pdata->exit(&spi->dev, mmc);
1495
1496 mmc_remove_host(mmc);
1497
1498 if (host->dma_dev) {
1499 dma_unmap_single(host->dma_dev, host->ones_dma,
1500 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1501 dma_unmap_single(host->dma_dev, host->data_dma,
1502 sizeof(*host->data), DMA_BIDIRECTIONAL);
1503 }
1504
1505 kfree(host->data);
1506 kfree(host->ones);
1507
1508 spi->max_speed_hz = mmc->f_max;
1509 mmc_free_host(mmc);
1510 mmc_spi_put_pdata(spi);
1511 dev_set_drvdata(&spi->dev, NULL);
1512 }
1513 return 0;
1514 }
1515
1516 static struct of_device_id mmc_spi_of_match_table[] = {
1517 { .compatible = "mmc-spi-slot", },
1518 {},
1519 };
1520
1521 static struct spi_driver mmc_spi_driver = {
1522 .driver = {
1523 .name = "mmc_spi",
1524 .owner = THIS_MODULE,
1525 .of_match_table = mmc_spi_of_match_table,
1526 },
1527 .probe = mmc_spi_probe,
1528 .remove = mmc_spi_remove,
1529 };
1530
1531 module_spi_driver(mmc_spi_driver);
1532
1533 MODULE_AUTHOR("Mike Lavender, David Brownell, "
1534 "Hans-Peter Nilsson, Jan Nikitenko");
1535 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1536 MODULE_LICENSE("GPL");
1537 MODULE_ALIAS("spi:mmc_spi");
1538
1539
1540
1541
1542
1543 /* LDV_COMMENT_BEGIN_MAIN */
1544 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
1545
1546 /*###########################################################################*/
1547
1548 /*############## Driver Environment Generator 0.2 output ####################*/
1549
1550 /*###########################################################################*/
1551
1552
1553
1554 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
1555 void ldv_check_final_state(void);
1556
1557 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
1558 void ldv_check_return_value(int res);
1559
1560 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
1561 void ldv_check_return_value_probe(int res);
1562
1563 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
1564 void ldv_initialize(void);
1565
1566 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
1567 void ldv_handler_precall(void);
1568
1569 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
1570 int nondet_int(void);
1571
1572 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
1573 int LDV_IN_INTERRUPT;
1574
1575 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
1576 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
1577
1578
1579
1580 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
1581 /*============================= VARIABLE DECLARATION PART =============================*/
1582 /** STRUCT: struct type: mmc_host_ops, struct name: mmc_spi_ops **/
1583 /* content: static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
1584 /* LDV_COMMENT_BEGIN_PREP */
1585 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1586 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1587 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1588 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1589 #define SPI_TOKEN_SINGLE 0xfe
1590 #define SPI_TOKEN_MULTI_WRITE 0xfc
1591 #define SPI_TOKEN_STOP_TRAN 0xfd
1592 #define MMC_SPI_BLOCKSIZE 512
1593 #define r1b_timeout (HZ * 3)
1594 #define MMC_SPI_BLOCKSATONCE 128
1595 /* LDV_COMMENT_END_PREP */
1596 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_request" */
1597 struct mmc_host * var_group1;
1598 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_request" */
1599 struct mmc_request * var_group2;
1600 /* content: static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
1601 /* LDV_COMMENT_BEGIN_PREP */
1602 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1603 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1604 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1605 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1606 #define SPI_TOKEN_SINGLE 0xfe
1607 #define SPI_TOKEN_MULTI_WRITE 0xfc
1608 #define SPI_TOKEN_STOP_TRAN 0xfd
1609 #define MMC_SPI_BLOCKSIZE 512
1610 #define r1b_timeout (HZ * 3)
1611 #define MMC_SPI_BLOCKSATONCE 128
1612 #ifdef DEBUG
1613 #endif
1614 /* LDV_COMMENT_END_PREP */
1615 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_set_ios" */
1616 struct mmc_ios * var_group3;
1617
1618 /** STRUCT: struct type: spi_driver, struct name: mmc_spi_driver **/
1619 /* content: static int mmc_spi_probe(struct spi_device *spi)*/
1620 /* LDV_COMMENT_BEGIN_PREP */
1621 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1622 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1623 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1624 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1625 #define SPI_TOKEN_SINGLE 0xfe
1626 #define SPI_TOKEN_MULTI_WRITE 0xfc
1627 #define SPI_TOKEN_STOP_TRAN 0xfd
1628 #define MMC_SPI_BLOCKSIZE 512
1629 #define r1b_timeout (HZ * 3)
1630 #define MMC_SPI_BLOCKSATONCE 128
1631 #ifdef DEBUG
1632 #endif
1633 /* LDV_COMMENT_END_PREP */
1634 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_probe" */
1635 struct spi_device * var_group4;
1636 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mmc_spi_probe" */
1637 static int res_mmc_spi_probe_17;
1638 /* content: static int mmc_spi_remove(struct spi_device *spi)*/
1639 /* LDV_COMMENT_BEGIN_PREP */
1640 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1641 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1642 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1643 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1644 #define SPI_TOKEN_SINGLE 0xfe
1645 #define SPI_TOKEN_MULTI_WRITE 0xfc
1646 #define SPI_TOKEN_STOP_TRAN 0xfd
1647 #define MMC_SPI_BLOCKSIZE 512
1648 #define r1b_timeout (HZ * 3)
1649 #define MMC_SPI_BLOCKSATONCE 128
1650 #ifdef DEBUG
1651 #endif
1652 /* LDV_COMMENT_END_PREP */
1653
1654 /** CALLBACK SECTION request_irq **/
1655 /* content: static irqreturn_t mmc_spi_detect_irq(int irq, void *mmc)*/
1656 /* LDV_COMMENT_BEGIN_PREP */
1657 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1658 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1659 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1660 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1661 #define SPI_TOKEN_SINGLE 0xfe
1662 #define SPI_TOKEN_MULTI_WRITE 0xfc
1663 #define SPI_TOKEN_STOP_TRAN 0xfd
1664 #define MMC_SPI_BLOCKSIZE 512
1665 #define r1b_timeout (HZ * 3)
1666 #define MMC_SPI_BLOCKSATONCE 128
1667 #ifdef DEBUG
1668 #endif
1669 /* LDV_COMMENT_END_PREP */
1670 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_detect_irq" */
1671 int var_mmc_spi_detect_irq_16_p0;
1672 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_detect_irq" */
1673 void * var_mmc_spi_detect_irq_16_p1;
1674
1675
1676
1677
1678 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
1679 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
1680 /*============================= VARIABLE INITIALIZING PART =============================*/
1681 LDV_IN_INTERRUPT=1;
1682
1683
1684
1685
1686 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
1687 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
1688 /*============================= FUNCTION CALL SECTION =============================*/
1689 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
1690 ldv_initialize();
1691
1692
1693 int ldv_s_mmc_spi_driver_spi_driver = 0;
1694
1695
1696
1697
1698 while( nondet_int()
1699 || !(ldv_s_mmc_spi_driver_spi_driver == 0)
1700 ) {
1701
1702 switch(nondet_int()) {
1703
1704 case 0: {
1705
1706 /** STRUCT: struct type: mmc_host_ops, struct name: mmc_spi_ops **/
1707
1708
1709 /* content: static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
1710 /* LDV_COMMENT_BEGIN_PREP */
1711 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1712 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1713 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1714 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1715 #define SPI_TOKEN_SINGLE 0xfe
1716 #define SPI_TOKEN_MULTI_WRITE 0xfc
1717 #define SPI_TOKEN_STOP_TRAN 0xfd
1718 #define MMC_SPI_BLOCKSIZE 512
1719 #define r1b_timeout (HZ * 3)
1720 #define MMC_SPI_BLOCKSATONCE 128
1721 /* LDV_COMMENT_END_PREP */
1722 /* LDV_COMMENT_FUNCTION_CALL Function from field "request" from driver structure with callbacks "mmc_spi_ops" */
1723 ldv_handler_precall();
1724 mmc_spi_request( var_group1, var_group2);
1725
1726
1727
1728
1729 }
1730
1731 break;
1732 case 1: {
1733
1734 /** STRUCT: struct type: mmc_host_ops, struct name: mmc_spi_ops **/
1735
1736
1737 /* content: static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
1738 /* LDV_COMMENT_BEGIN_PREP */
1739 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1740 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1741 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1742 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1743 #define SPI_TOKEN_SINGLE 0xfe
1744 #define SPI_TOKEN_MULTI_WRITE 0xfc
1745 #define SPI_TOKEN_STOP_TRAN 0xfd
1746 #define MMC_SPI_BLOCKSIZE 512
1747 #define r1b_timeout (HZ * 3)
1748 #define MMC_SPI_BLOCKSATONCE 128
1749 #ifdef DEBUG
1750 #endif
1751 /* LDV_COMMENT_END_PREP */
1752 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_ios" from driver structure with callbacks "mmc_spi_ops" */
1753 ldv_handler_precall();
1754 mmc_spi_set_ios( var_group1, var_group3);
1755
1756
1757
1758
1759 }
1760
1761 break;
1762 case 2: {
1763
1764 /** STRUCT: struct type: spi_driver, struct name: mmc_spi_driver **/
1765 if(ldv_s_mmc_spi_driver_spi_driver==0) {
1766
1767 /* content: static int mmc_spi_probe(struct spi_device *spi)*/
1768 /* LDV_COMMENT_BEGIN_PREP */
1769 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1770 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1771 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1772 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1773 #define SPI_TOKEN_SINGLE 0xfe
1774 #define SPI_TOKEN_MULTI_WRITE 0xfc
1775 #define SPI_TOKEN_STOP_TRAN 0xfd
1776 #define MMC_SPI_BLOCKSIZE 512
1777 #define r1b_timeout (HZ * 3)
1778 #define MMC_SPI_BLOCKSATONCE 128
1779 #ifdef DEBUG
1780 #endif
1781 /* LDV_COMMENT_END_PREP */
1782 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mmc_spi_driver". Standart function test for correct return result. */
1783 res_mmc_spi_probe_17 = mmc_spi_probe( var_group4);
1784 ldv_check_return_value(res_mmc_spi_probe_17);
1785 ldv_check_return_value_probe(res_mmc_spi_probe_17);
1786 if(res_mmc_spi_probe_17)
1787 goto ldv_module_exit;
1788 ldv_s_mmc_spi_driver_spi_driver++;
1789
1790 }
1791
1792 }
1793
1794 break;
1795 case 3: {
1796
1797 /** STRUCT: struct type: spi_driver, struct name: mmc_spi_driver **/
1798 if(ldv_s_mmc_spi_driver_spi_driver==1) {
1799
1800 /* content: static int mmc_spi_remove(struct spi_device *spi)*/
1801 /* LDV_COMMENT_BEGIN_PREP */
1802 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1803 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1804 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1805 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1806 #define SPI_TOKEN_SINGLE 0xfe
1807 #define SPI_TOKEN_MULTI_WRITE 0xfc
1808 #define SPI_TOKEN_STOP_TRAN 0xfd
1809 #define MMC_SPI_BLOCKSIZE 512
1810 #define r1b_timeout (HZ * 3)
1811 #define MMC_SPI_BLOCKSATONCE 128
1812 #ifdef DEBUG
1813 #endif
1814 /* LDV_COMMENT_END_PREP */
1815 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mmc_spi_driver" */
1816 ldv_handler_precall();
1817 mmc_spi_remove( var_group4);
1818 ldv_s_mmc_spi_driver_spi_driver=0;
1819
1820 }
1821
1822 }
1823
1824 break;
1825 case 4: {
1826
1827 /** CALLBACK SECTION request_irq **/
1828 LDV_IN_INTERRUPT=2;
1829
1830 /* content: static irqreturn_t mmc_spi_detect_irq(int irq, void *mmc)*/
1831 /* LDV_COMMENT_BEGIN_PREP */
1832 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1833 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1834 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1835 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1836 #define SPI_TOKEN_SINGLE 0xfe
1837 #define SPI_TOKEN_MULTI_WRITE 0xfc
1838 #define SPI_TOKEN_STOP_TRAN 0xfd
1839 #define MMC_SPI_BLOCKSIZE 512
1840 #define r1b_timeout (HZ * 3)
1841 #define MMC_SPI_BLOCKSATONCE 128
1842 #ifdef DEBUG
1843 #endif
1844 /* LDV_COMMENT_END_PREP */
1845 /* LDV_COMMENT_FUNCTION_CALL */
1846 ldv_handler_precall();
1847 mmc_spi_detect_irq( var_mmc_spi_detect_irq_16_p0, var_mmc_spi_detect_irq_16_p1);
1848 LDV_IN_INTERRUPT=1;
1849
1850
1851
1852 }
1853
1854 break;
1855 default: break;
1856
1857 }
1858
1859 }
1860
1861 ldv_module_exit:
1862
1863 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
1864 ldv_final: ldv_check_final_state();
1865
1866 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
1867 return;
1868
1869 }
1870 #endif
1871
1872 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/types.h>
3 #include <linux/dma-direction.h>
4
5 extern dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
6 extern dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir);
7 extern dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
8 extern int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
9 #line 1 "/home/druidos/temp/331_1a/work/current--X--drivers--X--defaultlinux-3.14.1.tar.xz--X--331_1a--X--cpachecker/linux-3.14.1.tar.xz/csd_deg_dscv/7740/dscv_tempdir/dscv/ri/331_1a/drivers/mmc/host/mmc_spi.c"
10
11 /*
12 * mmc_spi.c - Access SD/MMC cards through SPI master controllers
13 *
14 * (C) Copyright 2005, Intec Automation,
15 * Mike Lavender (mike@steroidmicros)
16 * (C) Copyright 2006-2007, David Brownell
17 * (C) Copyright 2007, Axis Communications,
18 * Hans-Peter Nilsson (hp@axis.com)
19 * (C) Copyright 2007, ATRON electronic GmbH,
20 * Jan Nikitenko <jan.nikitenko@gmail.com>
21 *
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 *
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
36 */
37 #include <linux/sched.h>
38 #include <linux/delay.h>
39 #include <linux/slab.h>
40 #include <linux/module.h>
41 #include <linux/bio.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/crc7.h>
44 #include <linux/crc-itu-t.h>
45 #include <linux/scatterlist.h>
46
47 #include <linux/mmc/host.h>
48 #include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
49 #include <linux/mmc/slot-gpio.h>
50
51 #include <linux/spi/spi.h>
52 #include <linux/spi/mmc_spi.h>
53
54 #include <asm/unaligned.h>
55
56
57 /* NOTES:
58 *
59 * - For now, we won't try to interoperate with a real mmc/sd/sdio
60 * controller, although some of them do have hardware support for
61 * SPI protocol. The main reason for such configs would be mmc-ish
62 * cards like DataFlash, which don't support that "native" protocol.
63 *
64 * We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
65 * switch between driver stacks, and in any case if "native" mode
66 * is available, it will be faster and hence preferable.
67 *
68 * - MMC depends on a different chipselect management policy than the
69 * SPI interface currently supports for shared bus segments: it needs
70 * to issue multiple spi_message requests with the chipselect active,
71 * using the results of one message to decide the next one to issue.
72 *
73 * Pending updates to the programming interface, this driver expects
74 * that it not share the bus with other drivers (precluding conflicts).
75 *
76 * - We tell the controller to keep the chipselect active from the
77 * beginning of an mmc_host_ops.request until the end. So beware
78 * of SPI controller drivers that mis-handle the cs_change flag!
79 *
80 * However, many cards seem OK with chipselect flapping up/down
81 * during that time ... at least on unshared bus segments.
82 */
83
84
85 /*
86 * Local protocol constants, internal to data block protocols.
87 */
88
89 /* Response tokens used to ack each block written: */
90 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
91 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
92 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
93 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
94
95 /* Read and write blocks start with these tokens and end with crc;
96 * on error, read tokens act like a subset of R2_SPI_* values.
97 */
98 #define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */
99 #define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */
100 #define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */
101
102 #define MMC_SPI_BLOCKSIZE 512
103
104
105 /* These fixed timeouts come from the latest SD specs, which say to ignore
106 * the CSD values. The R1B value is for card erase (e.g. the "I forgot the
107 * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after
108 * reads which takes nowhere near that long. Older cards may be able to use
109 * shorter timeouts ... but why bother?
110 */
111 #define r1b_timeout (HZ * 3)
112
113 /* One of the critical speed parameters is the amount of data which may
114 * be transferred in one command. If this value is too low, the SD card
115 * controller has to do multiple partial block writes (argggh!). With
116 * today (2008) SD cards there is little speed gain if we transfer more
117 * than 64 KBytes at a time. So use this value until there is any indication
118 * that we should do more here.
119 */
120 #define MMC_SPI_BLOCKSATONCE 128
121
122 /****************************************************************************/
123
124 /*
125 * Local Data Structures
126 */
127
128 /* "scratch" is per-{command,block} data exchanged with the card */
129 struct scratch {
130 u8 status[29];
131 u8 data_token;
132 __be16 crc_val;
133 };
134
135 struct mmc_spi_host {
136 struct mmc_host *mmc;
137 struct spi_device *spi;
138
139 unsigned char power_mode;
140 u16 powerup_msecs;
141
142 struct mmc_spi_platform_data *pdata;
143
144 /* for bulk data transfers */
145 struct spi_transfer token, t, crc, early_status;
146 struct spi_message m;
147
148 /* for status readback */
149 struct spi_transfer status;
150 struct spi_message readback;
151
152 /* underlying DMA-aware controller, or null */
153 struct device *dma_dev;
154
155 /* buffer used for commands and for message "overhead" */
156 struct scratch *data;
157 dma_addr_t data_dma;
158
159 /* Specs say to write ones most of the time, even when the card
160 * has no need to read its input data; and many cards won't care.
161 * This is our source of those ones.
162 */
163 void *ones;
164 dma_addr_t ones_dma;
165 };
166
167
168 /****************************************************************************/
169
170 /*
171 * MMC-over-SPI protocol glue, used by the MMC stack interface
172 */
173
174 static inline int mmc_cs_off(struct mmc_spi_host *host)
175 {
176 /* chipselect will always be inactive after setup() */
177 return spi_setup(host->spi);
178 }
179
180 static int
181 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
182 {
183 int status;
184
185 if (len > sizeof(*host->data)) {
186 WARN_ON(1);
187 return -EIO;
188 }
189
190 host->status.len = len;
191
192 if (host->dma_dev)
193 dma_sync_single_for_device(host->dma_dev,
194 host->data_dma, sizeof(*host->data),
195 DMA_FROM_DEVICE);
196
197 status = spi_sync_locked(host->spi, &host->readback);
198
199 if (host->dma_dev)
200 dma_sync_single_for_cpu(host->dma_dev,
201 host->data_dma, sizeof(*host->data),
202 DMA_FROM_DEVICE);
203
204 return status;
205 }
206
207 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
208 unsigned n, u8 byte)
209 {
210 u8 *cp = host->data->status;
211 unsigned long start = jiffies;
212
213 while (1) {
214 int status;
215 unsigned i;
216
217 status = mmc_spi_readbytes(host, n);
218 if (status < 0)
219 return status;
220
221 for (i = 0; i < n; i++) {
222 if (cp[i] != byte)
223 return cp[i];
224 }
225
226 if (time_is_before_jiffies(start + timeout))
227 break;
228
229 /* If we need long timeouts, we may release the CPU.
230 * We use jiffies here because we want to have a relation
231 * between elapsed time and the blocking of the scheduler.
232 */
233 if (time_is_before_jiffies(start+1))
234 schedule();
235 }
236 return -ETIMEDOUT;
237 }
238
239 static inline int
240 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
241 {
242 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
243 }
244
245 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
246 {
247 return mmc_spi_skip(host, timeout, 1, 0xff);
248 }
249
250
251 /*
252 * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
253 * hosts return! The low byte holds R1_SPI bits. The next byte may hold
254 * R2_SPI bits ... for SEND_STATUS, or after data read errors.
255 *
256 * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
257 * newer cards R7 (IF_COND).
258 */
259
260 static char *maptype(struct mmc_command *cmd)
261 {
262 switch (mmc_spi_resp_type(cmd)) {
263 case MMC_RSP_SPI_R1: return "R1";
264 case MMC_RSP_SPI_R1B: return "R1B";
265 case MMC_RSP_SPI_R2: return "R2/R5";
266 case MMC_RSP_SPI_R3: return "R3/R4/R7";
267 default: return "?";
268 }
269 }
270
271 /* return zero, else negative errno after setting cmd->error */
272 static int mmc_spi_response_get(struct mmc_spi_host *host,
273 struct mmc_command *cmd, int cs_on)
274 {
275 u8 *cp = host->data->status;
276 u8 *end = cp + host->t.len;
277 int value = 0;
278 int bitshift;
279 u8 leftover = 0;
280 unsigned short rotator;
281 int i;
282 char tag[32];
283
284 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
285 cmd->opcode, maptype(cmd));
286
287 /* Except for data block reads, the whole response will already
288 * be stored in the scratch buffer. It's somewhere after the
289 * command and the first byte we read after it. We ignore that
290 * first byte. After STOP_TRANSMISSION command it may include
291 * two data bits, but otherwise it's all ones.
292 */
293 cp += 8;
294 while (cp < end && *cp == 0xff)
295 cp++;
296
297 /* Data block reads (R1 response types) may need more data... */
298 if (cp == end) {
299 cp = host->data->status;
300 end = cp+1;
301
302 /* Card sends N(CR) (== 1..8) bytes of all-ones then one
303 * status byte ... and we already scanned 2 bytes.
304 *
305 * REVISIT block read paths use nasty byte-at-a-time I/O
306 * so it can always DMA directly into the target buffer.
307 * It'd probably be better to memcpy() the first chunk and
308 * avoid extra i/o calls...
309 *
310 * Note we check for more than 8 bytes, because in practice,
311 * some SD cards are slow...
312 */
313 for (i = 2; i < 16; i++) {
314 value = mmc_spi_readbytes(host, 1);
315 if (value < 0)
316 goto done;
317 if (*cp != 0xff)
318 goto checkstatus;
319 }
320 value = -ETIMEDOUT;
321 goto done;
322 }
323
324 checkstatus:
325 bitshift = 0;
326 if (*cp & 0x80) {
327 /* Houston, we have an ugly card with a bit-shifted response */
328 rotator = *cp++ << 8;
329 /* read the next byte */
330 if (cp == end) {
331 value = mmc_spi_readbytes(host, 1);
332 if (value < 0)
333 goto done;
334 cp = host->data->status;
335 end = cp+1;
336 }
337 rotator |= *cp++;
338 while (rotator & 0x8000) {
339 bitshift++;
340 rotator <<= 1;
341 }
342 cmd->resp[0] = rotator >> 8;
343 leftover = rotator;
344 } else {
345 cmd->resp[0] = *cp++;
346 }
347 cmd->error = 0;
348
349 /* Status byte: the entire seven-bit R1 response. */
350 if (cmd->resp[0] != 0) {
351 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
352 & cmd->resp[0])
353 value = -EFAULT; /* Bad address */
354 else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
355 value = -ENOSYS; /* Function not implemented */
356 else if (R1_SPI_COM_CRC & cmd->resp[0])
357 value = -EILSEQ; /* Illegal byte sequence */
358 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
359 & cmd->resp[0])
360 value = -EIO; /* I/O error */
361 /* else R1_SPI_IDLE, "it's resetting" */
362 }
363
364 switch (mmc_spi_resp_type(cmd)) {
365
366 /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
367 * and less-common stuff like various erase operations.
368 */
369 case MMC_RSP_SPI_R1B:
370 /* maybe we read all the busy tokens already */
371 while (cp < end && *cp == 0)
372 cp++;
373 if (cp == end)
374 mmc_spi_wait_unbusy(host, r1b_timeout);
375 break;
376
377 /* SPI R2 == R1 + second status byte; SEND_STATUS
378 * SPI R5 == R1 + data byte; IO_RW_DIRECT
379 */
380 case MMC_RSP_SPI_R2:
381 /* read the next byte */
382 if (cp == end) {
383 value = mmc_spi_readbytes(host, 1);
384 if (value < 0)
385 goto done;
386 cp = host->data->status;
387 end = cp+1;
388 }
389 if (bitshift) {
390 rotator = leftover << 8;
391 rotator |= *cp << bitshift;
392 cmd->resp[0] |= (rotator & 0xFF00);
393 } else {
394 cmd->resp[0] |= *cp << 8;
395 }
396 break;
397
398 /* SPI R3, R4, or R7 == R1 + 4 bytes */
399 case MMC_RSP_SPI_R3:
400 rotator = leftover << 8;
401 cmd->resp[1] = 0;
402 for (i = 0; i < 4; i++) {
403 cmd->resp[1] <<= 8;
404 /* read the next byte */
405 if (cp == end) {
406 value = mmc_spi_readbytes(host, 1);
407 if (value < 0)
408 goto done;
409 cp = host->data->status;
410 end = cp+1;
411 }
412 if (bitshift) {
413 rotator |= *cp++ << bitshift;
414 cmd->resp[1] |= (rotator >> 8);
415 rotator <<= 8;
416 } else {
417 cmd->resp[1] |= *cp++;
418 }
419 }
420 break;
421
422 /* SPI R1 == just one status byte */
423 case MMC_RSP_SPI_R1:
424 break;
425
426 default:
427 dev_dbg(&host->spi->dev, "bad response type %04x\n",
428 mmc_spi_resp_type(cmd));
429 if (value >= 0)
430 value = -EINVAL;
431 goto done;
432 }
433
434 if (value < 0)
435 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
436 tag, cmd->resp[0], cmd->resp[1]);
437
438 /* disable chipselect on errors and some success cases */
439 if (value >= 0 && cs_on)
440 return value;
441 done:
442 if (value < 0)
443 cmd->error = value;
444 mmc_cs_off(host);
445 return value;
446 }
447
448 /* Issue command and read its response.
449 * Returns zero on success, negative for error.
450 *
451 * On error, caller must cope with mmc core retry mechanism. That
452 * means immediate low-level resubmit, which affects the bus lock...
453 */
454 static int
455 mmc_spi_command_send(struct mmc_spi_host *host,
456 struct mmc_request *mrq,
457 struct mmc_command *cmd, int cs_on)
458 {
459 struct scratch *data = host->data;
460 u8 *cp = data->status;
461 u32 arg = cmd->arg;
462 int status;
463 struct spi_transfer *t;
464
465 /* We can handle most commands (except block reads) in one full
466 * duplex I/O operation before either starting the next transfer
467 * (data block or command) or else deselecting the card.
468 *
469 * First, write 7 bytes:
470 * - an all-ones byte to ensure the card is ready
471 * - opcode byte (plus start and transmission bits)
472 * - four bytes of big-endian argument
473 * - crc7 (plus end bit) ... always computed, it's cheap
474 *
475 * We init the whole buffer to all-ones, which is what we need
476 * to write while we're reading (later) response data.
477 */
478 memset(cp++, 0xff, sizeof(data->status));
479
480 *cp++ = 0x40 | cmd->opcode;
481 *cp++ = (u8)(arg >> 24);
482 *cp++ = (u8)(arg >> 16);
483 *cp++ = (u8)(arg >> 8);
484 *cp++ = (u8)arg;
485 *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
486
487 /* Then, read up to 13 bytes (while writing all-ones):
488 * - N(CR) (== 1..8) bytes of all-ones
489 * - status byte (for all response types)
490 * - the rest of the response, either:
491 * + nothing, for R1 or R1B responses
492 * + second status byte, for R2 responses
493 * + four data bytes, for R3 and R7 responses
494 *
495 * Finally, read some more bytes ... in the nice cases we know in
496 * advance how many, and reading 1 more is always OK:
497 * - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
498 * - N(RC) (== 1..N) bytes of all-ones, before next command
499 * - N(WR) (== 1..N) bytes of all-ones, before data write
500 *
501 * So in those cases one full duplex I/O of at most 21 bytes will
502 * handle the whole command, leaving the card ready to receive a
503 * data block or new command. We do that whenever we can, shaving
504 * CPU and IRQ costs (especially when using DMA or FIFOs).
505 *
506 * There are two other cases, where it's not generally practical
507 * to rely on a single I/O:
508 *
509 * - R1B responses need at least N(EC) bytes of all-zeroes.
510 *
511 * In this case we can *try* to fit it into one I/O, then
512 * maybe read more data later.
513 *
514 * - Data block reads are more troublesome, since a variable
515 * number of padding bytes precede the token and data.
516 * + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
517 * + N(AC) (== 1..many) bytes of all-ones
518 *
519 * In this case we currently only have minimal speedups here:
520 * when N(CR) == 1 we can avoid I/O in response_get().
521 */
522 if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
523 cp += 2; /* min(N(CR)) + status */
524 /* R1 */
525 } else {
526 cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */
527 if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */
528 cp++;
529 else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */
530 cp += 4;
531 else if (cmd->flags & MMC_RSP_BUSY) /* R1B */
532 cp = data->status + sizeof(data->status);
533 /* else: R1 (most commands) */
534 }
535
536 dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n",
537 cmd->opcode, maptype(cmd));
538
539 /* send command, leaving chipselect active */
540 spi_message_init(&host->m);
541
542 t = &host->t;
543 memset(t, 0, sizeof(*t));
544 t->tx_buf = t->rx_buf = data->status;
545 t->tx_dma = t->rx_dma = host->data_dma;
546 t->len = cp - data->status;
547 t->cs_change = 1;
548 spi_message_add_tail(t, &host->m);
549
550 if (host->dma_dev) {
551 host->m.is_dma_mapped = 1;
552 dma_sync_single_for_device(host->dma_dev,
553 host->data_dma, sizeof(*host->data),
554 DMA_BIDIRECTIONAL);
555 }
556 status = spi_sync_locked(host->spi, &host->m);
557
558 if (host->dma_dev)
559 dma_sync_single_for_cpu(host->dma_dev,
560 host->data_dma, sizeof(*host->data),
561 DMA_BIDIRECTIONAL);
562 if (status < 0) {
563 dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
564 cmd->error = status;
565 return status;
566 }
567
568 /* after no-data commands and STOP_TRANSMISSION, chipselect off */
569 return mmc_spi_response_get(host, cmd, cs_on);
570 }
571
572 /* Build data message with up to four separate transfers. For TX, we
573 * start by writing the data token. And in most cases, we finish with
574 * a status transfer.
575 *
576 * We always provide TX data for data and CRC. The MMC/SD protocol
577 * requires us to write ones; but Linux defaults to writing zeroes;
578 * so we explicitly initialize it to all ones on RX paths.
579 *
580 * We also handle DMA mapping, so the underlying SPI controller does
581 * not need to (re)do it for each message.
582 */
583 static void
584 mmc_spi_setup_data_message(
585 struct mmc_spi_host *host,
586 int multiple,
587 enum dma_data_direction direction)
588 {
589 struct spi_transfer *t;
590 struct scratch *scratch = host->data;
591 dma_addr_t dma = host->data_dma;
592
593 spi_message_init(&host->m);
594 if (dma)
595 host->m.is_dma_mapped = 1;
596
597 /* for reads, readblock() skips 0xff bytes before finding
598 * the token; for writes, this transfer issues that token.
599 */
600 if (direction == DMA_TO_DEVICE) {
601 t = &host->token;
602 memset(t, 0, sizeof(*t));
603 t->len = 1;
604 if (multiple)
605 scratch->data_token = SPI_TOKEN_MULTI_WRITE;
606 else
607 scratch->data_token = SPI_TOKEN_SINGLE;
608 t->tx_buf = &scratch->data_token;
609 if (dma)
610 t->tx_dma = dma + offsetof(struct scratch, data_token);
611 spi_message_add_tail(t, &host->m);
612 }
613
614 /* Body of transfer is buffer, then CRC ...
615 * either TX-only, or RX with TX-ones.
616 */
617 t = &host->t;
618 memset(t, 0, sizeof(*t));
619 t->tx_buf = host->ones;
620 t->tx_dma = host->ones_dma;
621 /* length and actual buffer info are written later */
622 spi_message_add_tail(t, &host->m);
623
624 t = &host->crc;
625 memset(t, 0, sizeof(*t));
626 t->len = 2;
627 if (direction == DMA_TO_DEVICE) {
628 /* the actual CRC may get written later */
629 t->tx_buf = &scratch->crc_val;
630 if (dma)
631 t->tx_dma = dma + offsetof(struct scratch, crc_val);
632 } else {
633 t->tx_buf = host->ones;
634 t->tx_dma = host->ones_dma;
635 t->rx_buf = &scratch->crc_val;
636 if (dma)
637 t->rx_dma = dma + offsetof(struct scratch, crc_val);
638 }
639 spi_message_add_tail(t, &host->m);
640
641 /*
642 * A single block read is followed by N(EC) [0+] all-ones bytes
643 * before deselect ... don't bother.
644 *
645 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
646 * the next block is read, or a STOP_TRANSMISSION is issued. We'll
647 * collect that single byte, so readblock() doesn't need to.
648 *
649 * For a write, the one-byte data response follows immediately, then
650 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
651 * Then single block reads may deselect, and multiblock ones issue
652 * the next token (next data block, or STOP_TRAN). We can try to
653 * minimize I/O ops by using a single read to collect end-of-busy.
654 */
655 if (multiple || direction == DMA_TO_DEVICE) {
656 t = &host->early_status;
657 memset(t, 0, sizeof(*t));
658 t->len = (direction == DMA_TO_DEVICE)
659 ? sizeof(scratch->status)
660 : 1;
661 t->tx_buf = host->ones;
662 t->tx_dma = host->ones_dma;
663 t->rx_buf = scratch->status;
664 if (dma)
665 t->rx_dma = dma + offsetof(struct scratch, status);
666 t->cs_change = 1;
667 spi_message_add_tail(t, &host->m);
668 }
669 }
670
671 /*
672 * Write one block:
673 * - caller handled preceding N(WR) [1+] all-ones bytes
674 * - data block
675 * + token
676 * + data bytes
677 * + crc16
678 * - an all-ones byte ... card writes a data-response byte
679 * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
680 *
681 * Return negative errno, else success.
682 */
683 static int
684 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
685 unsigned long timeout)
686 {
687 struct spi_device *spi = host->spi;
688 int status, i;
689 struct scratch *scratch = host->data;
690 u32 pattern;
691
692 if (host->mmc->use_spi_crc)
693 scratch->crc_val = cpu_to_be16(
694 crc_itu_t(0, t->tx_buf, t->len));
695 if (host->dma_dev)
696 dma_sync_single_for_device(host->dma_dev,
697 host->data_dma, sizeof(*scratch),
698 DMA_BIDIRECTIONAL);
699
700 status = spi_sync_locked(spi, &host->m);
701
702 if (status != 0) {
703 dev_dbg(&spi->dev, "write error (%d)\n", status);
704 return status;
705 }
706
707 if (host->dma_dev)
708 dma_sync_single_for_cpu(host->dma_dev,
709 host->data_dma, sizeof(*scratch),
710 DMA_BIDIRECTIONAL);
711
712 /*
713 * Get the transmission data-response reply. It must follow
714 * immediately after the data block we transferred. This reply
715 * doesn't necessarily tell whether the write operation succeeded;
716 * it just says if the transmission was ok and whether *earlier*
717 * writes succeeded; see the standard.
718 *
719 * In practice, there are (even modern SDHC-)cards which are late
720 * in sending the response, and miss the time frame by a few bits,
721 * so we have to cope with this situation and check the response
722 * bit-by-bit. Arggh!!!
723 */
724 pattern = scratch->status[0] << 24;
725 pattern |= scratch->status[1] << 16;
726 pattern |= scratch->status[2] << 8;
727 pattern |= scratch->status[3];
728
729 /* First 3 bit of pattern are undefined */
730 pattern |= 0xE0000000;
731
732 /* left-adjust to leading 0 bit */
733 while (pattern & 0x80000000)
734 pattern <<= 1;
735 /* right-adjust for pattern matching. Code is in bit 4..0 now. */
736 pattern >>= 27;
737
738 switch (pattern) {
739 case SPI_RESPONSE_ACCEPTED:
740 status = 0;
741 break;
742 case SPI_RESPONSE_CRC_ERR:
743 /* host shall then issue MMC_STOP_TRANSMISSION */
744 status = -EILSEQ;
745 break;
746 case SPI_RESPONSE_WRITE_ERR:
747 /* host shall then issue MMC_STOP_TRANSMISSION,
748 * and should MMC_SEND_STATUS to sort it out
749 */
750 status = -EIO;
751 break;
752 default:
753 status = -EPROTO;
754 break;
755 }
756 if (status != 0) {
757 dev_dbg(&spi->dev, "write error %02x (%d)\n",
758 scratch->status[0], status);
759 return status;
760 }
761
762 t->tx_buf += t->len;
763 if (host->dma_dev)
764 t->tx_dma += t->len;
765
766 /* Return when not busy. If we didn't collect that status yet,
767 * we'll need some more I/O.
768 */
769 for (i = 4; i < sizeof(scratch->status); i++) {
770 /* card is non-busy if the most recent bit is 1 */
771 if (scratch->status[i] & 0x01)
772 return 0;
773 }
774 return mmc_spi_wait_unbusy(host, timeout);
775 }
776
777 /*
778 * Read one block:
779 * - skip leading all-ones bytes ... either
780 * + N(AC) [1..f(clock,CSD)] usually, else
781 * + N(CX) [0..8] when reading CSD or CID
782 * - data block
783 * + token ... if error token, no data or crc
784 * + data bytes
785 * + crc16
786 *
787 * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
788 * before dropping chipselect.
789 *
790 * For multiblock reads, caller either reads the next block or issues a
791 * STOP_TRANSMISSION command.
792 */
793 static int
794 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
795 unsigned long timeout)
796 {
797 struct spi_device *spi = host->spi;
798 int status;
799 struct scratch *scratch = host->data;
800 unsigned int bitshift;
801 u8 leftover;
802
803 /* At least one SD card sends an all-zeroes byte when N(CX)
804 * applies, before the all-ones bytes ... just cope with that.
805 */
806 status = mmc_spi_readbytes(host, 1);
807 if (status < 0)
808 return status;
809 status = scratch->status[0];
810 if (status == 0xff || status == 0)
811 status = mmc_spi_readtoken(host, timeout);
812
813 if (status < 0) {
814 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
815 return status;
816 }
817
818 /* The token may be bit-shifted...
819 * the first 0-bit precedes the data stream.
820 */
821 bitshift = 7;
822 while (status & 0x80) {
823 status <<= 1;
824 bitshift--;
825 }
826 leftover = status << 1;
827
828 if (host->dma_dev) {
829 dma_sync_single_for_device(host->dma_dev,
830 host->data_dma, sizeof(*scratch),
831 DMA_BIDIRECTIONAL);
832 dma_sync_single_for_device(host->dma_dev,
833 t->rx_dma, t->len,
834 DMA_FROM_DEVICE);
835 }
836
837 status = spi_sync_locked(spi, &host->m);
838
839 if (host->dma_dev) {
840 dma_sync_single_for_cpu(host->dma_dev,
841 host->data_dma, sizeof(*scratch),
842 DMA_BIDIRECTIONAL);
843 dma_sync_single_for_cpu(host->dma_dev,
844 t->rx_dma, t->len,
845 DMA_FROM_DEVICE);
846 }
847
848 if (bitshift) {
849 /* Walk through the data and the crc and do
850 * all the magic to get byte-aligned data.
851 */
852 u8 *cp = t->rx_buf;
853 unsigned int len;
854 unsigned int bitright = 8 - bitshift;
855 u8 temp;
856 for (len = t->len; len; len--) {
857 temp = *cp;
858 *cp++ = leftover | (temp >> bitshift);
859 leftover = temp << bitright;
860 }
861 cp = (u8 *) &scratch->crc_val;
862 temp = *cp;
863 *cp++ = leftover | (temp >> bitshift);
864 leftover = temp << bitright;
865 temp = *cp;
866 *cp = leftover | (temp >> bitshift);
867 }
868
869 if (host->mmc->use_spi_crc) {
870 u16 crc = crc_itu_t(0, t->rx_buf, t->len);
871
872 be16_to_cpus(&scratch->crc_val);
873 if (scratch->crc_val != crc) {
874 dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
875 "computed=0x%04x len=%d\n",
876 scratch->crc_val, crc, t->len);
877 return -EILSEQ;
878 }
879 }
880
881 t->rx_buf += t->len;
882 if (host->dma_dev)
883 t->rx_dma += t->len;
884
885 return 0;
886 }
887
888 /*
889 * An MMC/SD data stage includes one or more blocks, optional CRCs,
890 * and inline handshaking. That handhaking makes it unlike most
891 * other SPI protocol stacks.
892 */
893 static void
894 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
895 struct mmc_data *data, u32 blk_size)
896 {
897 struct spi_device *spi = host->spi;
898 struct device *dma_dev = host->dma_dev;
899 struct spi_transfer *t;
900 enum dma_data_direction direction;
901 struct scatterlist *sg;
902 unsigned n_sg;
903 int multiple = (data->blocks > 1);
904 u32 clock_rate;
905 unsigned long timeout;
906
907 if (data->flags & MMC_DATA_READ)
908 direction = DMA_FROM_DEVICE;
909 else
910 direction = DMA_TO_DEVICE;
911 mmc_spi_setup_data_message(host, multiple, direction);
912 t = &host->t;
913
914 if (t->speed_hz)
915 clock_rate = t->speed_hz;
916 else
917 clock_rate = spi->max_speed_hz;
918
919 timeout = data->timeout_ns +
920 data->timeout_clks * 1000000 / clock_rate;
921 timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
922
923 /* Handle scatterlist segments one at a time, with synch for
924 * each 512-byte block
925 */
926 for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) {
927 int status = 0;
928 dma_addr_t dma_addr = 0;
929 void *kmap_addr;
930 unsigned length = sg->length;
931 enum dma_data_direction dir = direction;
932
933 /* set up dma mapping for controller drivers that might
934 * use DMA ... though they may fall back to PIO
935 */
936 if (dma_dev) {
937 /* never invalidate whole *shared* pages ... */
938 if ((sg->offset != 0 || length != PAGE_SIZE)
939 && dir == DMA_FROM_DEVICE)
940 dir = DMA_BIDIRECTIONAL;
941
942 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
943 PAGE_SIZE, dir);
944 if (direction == DMA_TO_DEVICE)
945 t->tx_dma = dma_addr + sg->offset;
946 else
947 t->rx_dma = dma_addr + sg->offset;
948 }
949
950 /* allow pio too; we don't allow highmem */
951 kmap_addr = kmap(sg_page(sg));
952 if (direction == DMA_TO_DEVICE)
953 t->tx_buf = kmap_addr + sg->offset;
954 else
955 t->rx_buf = kmap_addr + sg->offset;
956
957 /* transfer each block, and update request status */
958 while (length) {
959 t->len = min(length, blk_size);
960
961 dev_dbg(&host->spi->dev,
962 " mmc_spi: %s block, %d bytes\n",
963 (direction == DMA_TO_DEVICE)
964 ? "write"
965 : "read",
966 t->len);
967
968 if (direction == DMA_TO_DEVICE)
969 status = mmc_spi_writeblock(host, t, timeout);
970 else
971 status = mmc_spi_readblock(host, t, timeout);
972 if (status < 0)
973 break;
974
975 data->bytes_xfered += t->len;
976 length -= t->len;
977
978 if (!multiple)
979 break;
980 }
981
982 /* discard mappings */
983 if (direction == DMA_FROM_DEVICE)
984 flush_kernel_dcache_page(sg_page(sg));
985 kunmap(sg_page(sg));
986 if (dma_dev)
987 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
988
989 if (status < 0) {
990 data->error = status;
991 dev_dbg(&spi->dev, "%s status %d\n",
992 (direction == DMA_TO_DEVICE)
993 ? "write" : "read",
994 status);
995 break;
996 }
997 }
998
999 /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
1000 * can be issued before multiblock writes. Unlike its more widely
1001 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
1002 * that can affect the STOP_TRAN logic. Complete (and current)
1003 * MMC specs should sort that out before Linux starts using CMD23.
1004 */
1005 if (direction == DMA_TO_DEVICE && multiple) {
1006 struct scratch *scratch = host->data;
1007 int tmp;
1008 const unsigned statlen = sizeof(scratch->status);
1009
1010 dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n");
1011
1012 /* Tweak the per-block message we set up earlier by morphing
1013 * it to hold single buffer with the token followed by some
1014 * all-ones bytes ... skip N(BR) (0..1), scan the rest for
1015 * "not busy any longer" status, and leave chip selected.
1016 */
1017 INIT_LIST_HEAD(&host->m.transfers);
1018 list_add(&host->early_status.transfer_list,
1019 &host->m.transfers);
1020
1021 memset(scratch->status, 0xff, statlen);
1022 scratch->status[0] = SPI_TOKEN_STOP_TRAN;
1023
1024 host->early_status.tx_buf = host->early_status.rx_buf;
1025 host->early_status.tx_dma = host->early_status.rx_dma;
1026 host->early_status.len = statlen;
1027
1028 if (host->dma_dev)
1029 dma_sync_single_for_device(host->dma_dev,
1030 host->data_dma, sizeof(*scratch),
1031 DMA_BIDIRECTIONAL);
1032
1033 tmp = spi_sync_locked(spi, &host->m);
1034
1035 if (host->dma_dev)
1036 dma_sync_single_for_cpu(host->dma_dev,
1037 host->data_dma, sizeof(*scratch),
1038 DMA_BIDIRECTIONAL);
1039
1040 if (tmp < 0) {
1041 if (!data->error)
1042 data->error = tmp;
1043 return;
1044 }
1045
1046 /* Ideally we collected "not busy" status with one I/O,
1047 * avoiding wasteful byte-at-a-time scanning... but more
1048 * I/O is often needed.
1049 */
1050 for (tmp = 2; tmp < statlen; tmp++) {
1051 if (scratch->status[tmp] != 0)
1052 return;
1053 }
1054 tmp = mmc_spi_wait_unbusy(host, timeout);
1055 if (tmp < 0 && !data->error)
1056 data->error = tmp;
1057 }
1058 }
1059
1060 /****************************************************************************/
1061
1062 /*
1063 * MMC driver implementation -- the interface to the MMC stack
1064 */
1065
1066 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1067 {
1068 struct mmc_spi_host *host = mmc_priv(mmc);
1069 int status = -EINVAL;
1070 int crc_retry = 5;
1071 struct mmc_command stop;
1072
1073 #ifdef DEBUG
1074 /* MMC core and layered drivers *MUST* issue SPI-aware commands */
1075 {
1076 struct mmc_command *cmd;
1077 int invalid = 0;
1078
1079 cmd = mrq->cmd;
1080 if (!mmc_spi_resp_type(cmd)) {
1081 dev_dbg(&host->spi->dev, "bogus command\n");
1082 cmd->error = -EINVAL;
1083 invalid = 1;
1084 }
1085
1086 cmd = mrq->stop;
1087 if (cmd && !mmc_spi_resp_type(cmd)) {
1088 dev_dbg(&host->spi->dev, "bogus STOP command\n");
1089 cmd->error = -EINVAL;
1090 invalid = 1;
1091 }
1092
1093 if (invalid) {
1094 dump_stack();
1095 mmc_request_done(host->mmc, mrq);
1096 return;
1097 }
1098 }
1099 #endif
1100
1101 /* request exclusive bus access */
1102 spi_bus_lock(host->spi->master);
1103
1104 crc_recover:
1105 /* issue command; then optionally data and stop */
1106 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1107 if (status == 0 && mrq->data) {
1108 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1109
1110 /*
1111 * The SPI bus is not always reliable for large data transfers.
1112 * If an occasional crc error is reported by the SD device with
1113 * data read/write over SPI, it may be recovered by repeating
1114 * the last SD command again. The retry count is set to 5 to
1115 * ensure the driver passes stress tests.
1116 */
1117 if (mrq->data->error == -EILSEQ && crc_retry) {
1118 stop.opcode = MMC_STOP_TRANSMISSION;
1119 stop.arg = 0;
1120 stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1121 status = mmc_spi_command_send(host, mrq, &stop, 0);
1122 crc_retry--;
1123 mrq->data->error = 0;
1124 goto crc_recover;
1125 }
1126
1127 if (mrq->stop)
1128 status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1129 else
1130 mmc_cs_off(host);
1131 }
1132
1133 /* release the bus */
1134 spi_bus_unlock(host->spi->master);
1135
1136 mmc_request_done(host->mmc, mrq);
1137 }
1138
1139 /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
1140 *
1141 * NOTE that here we can't know that the card has just been powered up;
1142 * not all MMC/SD sockets support power switching.
1143 *
1144 * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
1145 * this doesn't seem to do the right thing at all...
1146 */
1147 static void mmc_spi_initsequence(struct mmc_spi_host *host)
1148 {
1149 /* Try to be very sure any previous command has completed;
1150 * wait till not-busy, skip debris from any old commands.
1151 */
1152 mmc_spi_wait_unbusy(host, r1b_timeout);
1153 mmc_spi_readbytes(host, 10);
1154
1155 /*
1156 * Do a burst with chipselect active-high. We need to do this to
1157 * meet the requirement of 74 clock cycles with both chipselect
1158 * and CMD (MOSI) high before CMD0 ... after the card has been
1159 * powered up to Vdd(min), and so is ready to take commands.
1160 *
1161 * Some cards are particularly needy of this (e.g. Viking "SD256")
1162 * while most others don't seem to care.
1163 *
1164 * Note that this is one of the places MMC/SD plays games with the
1165 * SPI protocol. Another is that when chipselect is released while
1166 * the card returns BUSY status, the clock must issue several cycles
1167 * with chipselect high before the card will stop driving its output.
1168 */
1169 host->spi->mode |= SPI_CS_HIGH;
1170 if (spi_setup(host->spi) != 0) {
1171 /* Just warn; most cards work without it. */
1172 dev_warn(&host->spi->dev,
1173 "can't change chip-select polarity\n");
1174 host->spi->mode &= ~SPI_CS_HIGH;
1175 } else {
1176 mmc_spi_readbytes(host, 18);
1177
1178 host->spi->mode &= ~SPI_CS_HIGH;
1179 if (spi_setup(host->spi) != 0) {
1180 /* Wot, we can't get the same setup we had before? */
1181 dev_err(&host->spi->dev,
1182 "can't restore chip-select polarity\n");
1183 }
1184 }
1185 }
1186
1187 static char *mmc_powerstring(u8 power_mode)
1188 {
1189 switch (power_mode) {
1190 case MMC_POWER_OFF: return "off";
1191 case MMC_POWER_UP: return "up";
1192 case MMC_POWER_ON: return "on";
1193 }
1194 return "?";
1195 }
1196
1197 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1198 {
1199 struct mmc_spi_host *host = mmc_priv(mmc);
1200
1201 if (host->power_mode != ios->power_mode) {
1202 int canpower;
1203
1204 canpower = host->pdata && host->pdata->setpower;
1205
1206 dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
1207 mmc_powerstring(ios->power_mode),
1208 ios->vdd,
1209 canpower ? ", can switch" : "");
1210
1211 /* switch power on/off if possible, accounting for
1212 * max 250msec powerup time if needed.
1213 */
1214 if (canpower) {
1215 switch (ios->power_mode) {
1216 case MMC_POWER_OFF:
1217 case MMC_POWER_UP:
1218 host->pdata->setpower(&host->spi->dev,
1219 ios->vdd);
1220 if (ios->power_mode == MMC_POWER_UP)
1221 msleep(host->powerup_msecs);
1222 }
1223 }
1224
1225 /* See 6.4.1 in the simplified SD card physical spec 2.0 */
1226 if (ios->power_mode == MMC_POWER_ON)
1227 mmc_spi_initsequence(host);
1228
1229 /* If powering down, ground all card inputs to avoid power
1230 * delivery from data lines! On a shared SPI bus, this
1231 * will probably be temporary; 6.4.2 of the simplified SD
1232 * spec says this must last at least 1msec.
1233 *
1234 * - Clock low means CPOL 0, e.g. mode 0
1235 * - MOSI low comes from writing zero
1236 * - Chipselect is usually active low...
1237 */
1238 if (canpower && ios->power_mode == MMC_POWER_OFF) {
1239 int mres;
1240 u8 nullbyte = 0;
1241
1242 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1243 mres = spi_setup(host->spi);
1244 if (mres < 0)
1245 dev_dbg(&host->spi->dev,
1246 "switch to SPI mode 0 failed\n");
1247
1248 if (spi_write(host->spi, &nullbyte, 1) < 0)
1249 dev_dbg(&host->spi->dev,
1250 "put spi signals to low failed\n");
1251
1252 /*
1253 * Now clock should be low due to spi mode 0;
1254 * MOSI should be low because of written 0x00;
1255 * chipselect should be low (it is active low)
1256 * power supply is off, so now MMC is off too!
1257 *
1258 * FIXME no, chipselect can be high since the
1259 * device is inactive and SPI_CS_HIGH is clear...
1260 */
1261 msleep(10);
1262 if (mres == 0) {
1263 host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1264 mres = spi_setup(host->spi);
1265 if (mres < 0)
1266 dev_dbg(&host->spi->dev,
1267 "switch back to SPI mode 3"
1268 " failed\n");
1269 }
1270 }
1271
1272 host->power_mode = ios->power_mode;
1273 }
1274
1275 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1276 int status;
1277
1278 host->spi->max_speed_hz = ios->clock;
1279 status = spi_setup(host->spi);
1280 dev_dbg(&host->spi->dev,
1281 "mmc_spi: clock to %d Hz, %d\n",
1282 host->spi->max_speed_hz, status);
1283 }
1284 }
1285
1286 static const struct mmc_host_ops mmc_spi_ops = {
1287 .request = mmc_spi_request,
1288 .set_ios = mmc_spi_set_ios,
1289 .get_ro = mmc_gpio_get_ro,
1290 .get_cd = mmc_gpio_get_cd,
1291 };
1292
1293
1294 /****************************************************************************/
1295
1296 /*
1297 * SPI driver implementation
1298 */
1299
1300 static irqreturn_t
1301 mmc_spi_detect_irq(int irq, void *mmc)
1302 {
1303 struct mmc_spi_host *host = mmc_priv(mmc);
1304 u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1305
1306 mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1307 return IRQ_HANDLED;
1308 }
1309
1310 static int mmc_spi_probe(struct spi_device *spi)
1311 {
1312 void *ones;
1313 struct mmc_host *mmc;
1314 struct mmc_spi_host *host;
1315 int status;
1316 bool has_ro = false;
1317
1318 /* We rely on full duplex transfers, mostly to reduce
1319 * per-transfer overheads (by making fewer transfers).
1320 */
1321 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1322 return -EINVAL;
1323
1324 /* MMC and SD specs only seem to care that sampling is on the
1325 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode
1326 * should be legit. We'll use mode 0 since the steady state is 0,
1327 * which is appropriate for hotplugging, unless the platform data
1328 * specify mode 3 (if hardware is not compatible to mode 0).
1329 */
1330 if (spi->mode != SPI_MODE_3)
1331 spi->mode = SPI_MODE_0;
1332 spi->bits_per_word = 8;
1333
1334 status = spi_setup(spi);
1335 if (status < 0) {
1336 dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1337 spi->mode, spi->max_speed_hz / 1000,
1338 status);
1339 return status;
1340 }
1341
1342 /* We need a supply of ones to transmit. This is the only time
1343 * the CPU touches these, so cache coherency isn't a concern.
1344 *
1345 * NOTE if many systems use more than one MMC-over-SPI connector
1346 * it'd save some memory to share this. That's evidently rare.
1347 */
1348 status = -ENOMEM;
1349 ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1350 if (!ones)
1351 goto nomem;
1352 memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1353
1354 mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1355 if (!mmc)
1356 goto nomem;
1357
1358 mmc->ops = &mmc_spi_ops;
1359 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1360 mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1361 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1362 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1363
1364 mmc->caps = MMC_CAP_SPI;
1365
1366 /* SPI doesn't need the lowspeed device identification thing for
1367 * MMC or SD cards, since it never comes up in open drain mode.
1368 * That's good; some SPI masters can't handle very low speeds!
1369 *
1370 * However, low speed SDIO cards need not handle over 400 KHz;
1371 * that's the only reason not to use a few MHz for f_min (until
1372 * the upper layer reads the target frequency from the CSD).
1373 */
1374 mmc->f_min = 400000;
1375 mmc->f_max = spi->max_speed_hz;
1376
1377 host = mmc_priv(mmc);
1378 host->mmc = mmc;
1379 host->spi = spi;
1380
1381 host->ones = ones;
1382
1383 /* Platform data is used to hook up things like card sensing
1384 * and power switching gpios.
1385 */
1386 host->pdata = mmc_spi_get_pdata(spi);
1387 if (host->pdata)
1388 mmc->ocr_avail = host->pdata->ocr_mask;
1389 if (!mmc->ocr_avail) {
1390 dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1391 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1392 }
1393 if (host->pdata && host->pdata->setpower) {
1394 host->powerup_msecs = host->pdata->powerup_msecs;
1395 if (!host->powerup_msecs || host->powerup_msecs > 250)
1396 host->powerup_msecs = 250;
1397 }
1398
1399 dev_set_drvdata(&spi->dev, mmc);
1400
1401 /* preallocate dma buffers */
1402 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1403 if (!host->data)
1404 goto fail_nobuf1;
1405
1406 if (spi->master->dev.parent->dma_mask) {
1407 struct device *dev = spi->master->dev.parent;
1408
1409 host->dma_dev = dev;
1410 host->ones_dma = dma_map_single(dev, ones,
1411 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1412 host->data_dma = dma_map_single(dev, host->data,
1413 sizeof(*host->data), DMA_BIDIRECTIONAL);
1414
1415 /* REVISIT in theory those map operations can fail... */
1416
1417 dma_sync_single_for_cpu(host->dma_dev,
1418 host->data_dma, sizeof(*host->data),
1419 DMA_BIDIRECTIONAL);
1420 }
1421
1422 /* setup message for status/busy readback */
1423 spi_message_init(&host->readback);
1424 host->readback.is_dma_mapped = (host->dma_dev != NULL);
1425
1426 spi_message_add_tail(&host->status, &host->readback);
1427 host->status.tx_buf = host->ones;
1428 host->status.tx_dma = host->ones_dma;
1429 host->status.rx_buf = &host->data->status;
1430 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1431 host->status.cs_change = 1;
1432
1433 /* register card detect irq */
1434 if (host->pdata && host->pdata->init) {
1435 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1436 if (status != 0)
1437 goto fail_glue_init;
1438 }
1439
1440 /* pass platform capabilities, if any */
1441 if (host->pdata) {
1442 mmc->caps |= host->pdata->caps;
1443 mmc->caps2 |= host->pdata->caps2;
1444 }
1445
1446 status = mmc_add_host(mmc);
1447 if (status != 0)
1448 goto fail_add_host;
1449
1450 if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) {
1451 status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio,
1452 host->pdata->cd_debounce);
1453 if (status != 0)
1454 goto fail_add_host;
1455 }
1456
1457 if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
1458 has_ro = true;
1459 status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio);
1460 if (status != 0)
1461 goto fail_add_host;
1462 }
1463
1464 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1465 dev_name(&mmc->class_dev),
1466 host->dma_dev ? "" : ", no DMA",
1467 has_ro ? "" : ", no WP",
1468 (host->pdata && host->pdata->setpower)
1469 ? "" : ", no poweroff",
1470 (mmc->caps & MMC_CAP_NEEDS_POLL)
1471 ? ", cd polling" : "");
1472 return 0;
1473
1474 fail_add_host:
1475 mmc_remove_host (mmc);
1476 fail_glue_init:
1477 if (host->dma_dev)
1478 dma_unmap_single(host->dma_dev, host->data_dma,
1479 sizeof(*host->data), DMA_BIDIRECTIONAL);
1480 kfree(host->data);
1481
1482 fail_nobuf1:
1483 mmc_free_host(mmc);
1484 mmc_spi_put_pdata(spi);
1485 dev_set_drvdata(&spi->dev, NULL);
1486
1487 nomem:
1488 kfree(ones);
1489 return status;
1490 }
1491
1492
1493 static int mmc_spi_remove(struct spi_device *spi)
1494 {
1495 struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
1496 struct mmc_spi_host *host;
1497
1498 if (mmc) {
1499 host = mmc_priv(mmc);
1500
1501 /* prevent new mmc_detect_change() calls */
1502 if (host->pdata && host->pdata->exit)
1503 host->pdata->exit(&spi->dev, mmc);
1504
1505 mmc_remove_host(mmc);
1506
1507 if (host->dma_dev) {
1508 dma_unmap_single(host->dma_dev, host->ones_dma,
1509 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1510 dma_unmap_single(host->dma_dev, host->data_dma,
1511 sizeof(*host->data), DMA_BIDIRECTIONAL);
1512 }
1513
1514 kfree(host->data);
1515 kfree(host->ones);
1516
1517 spi->max_speed_hz = mmc->f_max;
1518 mmc_free_host(mmc);
1519 mmc_spi_put_pdata(spi);
1520 dev_set_drvdata(&spi->dev, NULL);
1521 }
1522 return 0;
1523 }
1524
1525 static struct of_device_id mmc_spi_of_match_table[] = {
1526 { .compatible = "mmc-spi-slot", },
1527 {},
1528 };
1529
1530 static struct spi_driver mmc_spi_driver = {
1531 .driver = {
1532 .name = "mmc_spi",
1533 .owner = THIS_MODULE,
1534 .of_match_table = mmc_spi_of_match_table,
1535 },
1536 .probe = mmc_spi_probe,
1537 .remove = mmc_spi_remove,
1538 };
1539
1540 module_spi_driver(mmc_spi_driver);
1541
1542 MODULE_AUTHOR("Mike Lavender, David Brownell, "
1543 "Hans-Peter Nilsson, Jan Nikitenko");
1544 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1545 MODULE_LICENSE("GPL");
1546 MODULE_ALIAS("spi:mmc_spi");
1547
1548
1549
1550
1551
1552 /* LDV_COMMENT_BEGIN_MAIN */
1553 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
1554
1555 /*###########################################################################*/
1556
1557 /*############## Driver Environment Generator 0.2 output ####################*/
1558
1559 /*###########################################################################*/
1560
1561
1562
1563 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
1564 void ldv_check_final_state(void);
1565
1566 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
1567 void ldv_check_return_value(int res);
1568
1569 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
1570 void ldv_check_return_value_probe(int res);
1571
1572 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
1573 void ldv_initialize(void);
1574
1575 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
1576 void ldv_handler_precall(void);
1577
1578 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
1579 int nondet_int(void);
1580
1581 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
1582 int LDV_IN_INTERRUPT;
1583
1584 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
1585 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
1586
1587
1588
1589 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
1590 /*============================= VARIABLE DECLARATION PART =============================*/
1591 /** STRUCT: struct type: mmc_host_ops, struct name: mmc_spi_ops **/
1592 /* content: static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
1593 /* LDV_COMMENT_BEGIN_PREP */
1594 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1595 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1596 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1597 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1598 #define SPI_TOKEN_SINGLE 0xfe
1599 #define SPI_TOKEN_MULTI_WRITE 0xfc
1600 #define SPI_TOKEN_STOP_TRAN 0xfd
1601 #define MMC_SPI_BLOCKSIZE 512
1602 #define r1b_timeout (HZ * 3)
1603 #define MMC_SPI_BLOCKSATONCE 128
1604 /* LDV_COMMENT_END_PREP */
1605 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_request" */
1606 struct mmc_host * var_group1;
1607 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_request" */
1608 struct mmc_request * var_group2;
1609 /* content: static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
1610 /* LDV_COMMENT_BEGIN_PREP */
1611 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1612 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1613 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1614 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1615 #define SPI_TOKEN_SINGLE 0xfe
1616 #define SPI_TOKEN_MULTI_WRITE 0xfc
1617 #define SPI_TOKEN_STOP_TRAN 0xfd
1618 #define MMC_SPI_BLOCKSIZE 512
1619 #define r1b_timeout (HZ * 3)
1620 #define MMC_SPI_BLOCKSATONCE 128
1621 #ifdef DEBUG
1622 #endif
1623 /* LDV_COMMENT_END_PREP */
1624 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_set_ios" */
1625 struct mmc_ios * var_group3;
1626
1627 /** STRUCT: struct type: spi_driver, struct name: mmc_spi_driver **/
1628 /* content: static int mmc_spi_probe(struct spi_device *spi)*/
1629 /* LDV_COMMENT_BEGIN_PREP */
1630 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1631 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1632 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1633 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1634 #define SPI_TOKEN_SINGLE 0xfe
1635 #define SPI_TOKEN_MULTI_WRITE 0xfc
1636 #define SPI_TOKEN_STOP_TRAN 0xfd
1637 #define MMC_SPI_BLOCKSIZE 512
1638 #define r1b_timeout (HZ * 3)
1639 #define MMC_SPI_BLOCKSATONCE 128
1640 #ifdef DEBUG
1641 #endif
1642 /* LDV_COMMENT_END_PREP */
1643 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_probe" */
1644 struct spi_device * var_group4;
1645 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mmc_spi_probe" */
1646 static int res_mmc_spi_probe_17;
1647 /* content: static int mmc_spi_remove(struct spi_device *spi)*/
1648 /* LDV_COMMENT_BEGIN_PREP */
1649 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1650 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1651 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1652 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1653 #define SPI_TOKEN_SINGLE 0xfe
1654 #define SPI_TOKEN_MULTI_WRITE 0xfc
1655 #define SPI_TOKEN_STOP_TRAN 0xfd
1656 #define MMC_SPI_BLOCKSIZE 512
1657 #define r1b_timeout (HZ * 3)
1658 #define MMC_SPI_BLOCKSATONCE 128
1659 #ifdef DEBUG
1660 #endif
1661 /* LDV_COMMENT_END_PREP */
1662
1663 /** CALLBACK SECTION request_irq **/
1664 /* content: static irqreturn_t mmc_spi_detect_irq(int irq, void *mmc)*/
1665 /* LDV_COMMENT_BEGIN_PREP */
1666 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1667 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1668 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1669 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1670 #define SPI_TOKEN_SINGLE 0xfe
1671 #define SPI_TOKEN_MULTI_WRITE 0xfc
1672 #define SPI_TOKEN_STOP_TRAN 0xfd
1673 #define MMC_SPI_BLOCKSIZE 512
1674 #define r1b_timeout (HZ * 3)
1675 #define MMC_SPI_BLOCKSATONCE 128
1676 #ifdef DEBUG
1677 #endif
1678 /* LDV_COMMENT_END_PREP */
1679 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_detect_irq" */
1680 int var_mmc_spi_detect_irq_16_p0;
1681 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mmc_spi_detect_irq" */
1682 void * var_mmc_spi_detect_irq_16_p1;
1683
1684
1685
1686
1687 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
1688 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
1689 /*============================= VARIABLE INITIALIZING PART =============================*/
1690 LDV_IN_INTERRUPT=1;
1691
1692
1693
1694
1695 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
1696 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
1697 /*============================= FUNCTION CALL SECTION =============================*/
1698 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
1699 ldv_initialize();
1700
1701
1702 int ldv_s_mmc_spi_driver_spi_driver = 0;
1703
1704
1705
1706
1707 while( nondet_int()
1708 || !(ldv_s_mmc_spi_driver_spi_driver == 0)
1709 ) {
1710
1711 switch(nondet_int()) {
1712
1713 case 0: {
1714
1715 /** STRUCT: struct type: mmc_host_ops, struct name: mmc_spi_ops **/
1716
1717
1718 /* content: static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
1719 /* LDV_COMMENT_BEGIN_PREP */
1720 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1721 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1722 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1723 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1724 #define SPI_TOKEN_SINGLE 0xfe
1725 #define SPI_TOKEN_MULTI_WRITE 0xfc
1726 #define SPI_TOKEN_STOP_TRAN 0xfd
1727 #define MMC_SPI_BLOCKSIZE 512
1728 #define r1b_timeout (HZ * 3)
1729 #define MMC_SPI_BLOCKSATONCE 128
1730 /* LDV_COMMENT_END_PREP */
1731 /* LDV_COMMENT_FUNCTION_CALL Function from field "request" from driver structure with callbacks "mmc_spi_ops" */
1732 ldv_handler_precall();
1733 mmc_spi_request( var_group1, var_group2);
1734
1735
1736
1737
1738 }
1739
1740 break;
1741 case 1: {
1742
1743 /** STRUCT: struct type: mmc_host_ops, struct name: mmc_spi_ops **/
1744
1745
1746 /* content: static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
1747 /* LDV_COMMENT_BEGIN_PREP */
1748 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1749 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1750 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1751 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1752 #define SPI_TOKEN_SINGLE 0xfe
1753 #define SPI_TOKEN_MULTI_WRITE 0xfc
1754 #define SPI_TOKEN_STOP_TRAN 0xfd
1755 #define MMC_SPI_BLOCKSIZE 512
1756 #define r1b_timeout (HZ * 3)
1757 #define MMC_SPI_BLOCKSATONCE 128
1758 #ifdef DEBUG
1759 #endif
1760 /* LDV_COMMENT_END_PREP */
1761 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_ios" from driver structure with callbacks "mmc_spi_ops" */
1762 ldv_handler_precall();
1763 mmc_spi_set_ios( var_group1, var_group3);
1764
1765
1766
1767
1768 }
1769
1770 break;
1771 case 2: {
1772
1773 /** STRUCT: struct type: spi_driver, struct name: mmc_spi_driver **/
1774 if(ldv_s_mmc_spi_driver_spi_driver==0) {
1775
1776 /* content: static int mmc_spi_probe(struct spi_device *spi)*/
1777 /* LDV_COMMENT_BEGIN_PREP */
1778 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1779 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1780 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1781 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1782 #define SPI_TOKEN_SINGLE 0xfe
1783 #define SPI_TOKEN_MULTI_WRITE 0xfc
1784 #define SPI_TOKEN_STOP_TRAN 0xfd
1785 #define MMC_SPI_BLOCKSIZE 512
1786 #define r1b_timeout (HZ * 3)
1787 #define MMC_SPI_BLOCKSATONCE 128
1788 #ifdef DEBUG
1789 #endif
1790 /* LDV_COMMENT_END_PREP */
1791 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mmc_spi_driver". Standart function test for correct return result. */
1792 res_mmc_spi_probe_17 = mmc_spi_probe( var_group4);
1793 ldv_check_return_value(res_mmc_spi_probe_17);
1794 ldv_check_return_value_probe(res_mmc_spi_probe_17);
1795 if(res_mmc_spi_probe_17)
1796 goto ldv_module_exit;
1797 ldv_s_mmc_spi_driver_spi_driver++;
1798
1799 }
1800
1801 }
1802
1803 break;
1804 case 3: {
1805
1806 /** STRUCT: struct type: spi_driver, struct name: mmc_spi_driver **/
1807 if(ldv_s_mmc_spi_driver_spi_driver==1) {
1808
1809 /* content: static int mmc_spi_remove(struct spi_device *spi)*/
1810 /* LDV_COMMENT_BEGIN_PREP */
1811 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1812 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1813 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1814 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1815 #define SPI_TOKEN_SINGLE 0xfe
1816 #define SPI_TOKEN_MULTI_WRITE 0xfc
1817 #define SPI_TOKEN_STOP_TRAN 0xfd
1818 #define MMC_SPI_BLOCKSIZE 512
1819 #define r1b_timeout (HZ * 3)
1820 #define MMC_SPI_BLOCKSATONCE 128
1821 #ifdef DEBUG
1822 #endif
1823 /* LDV_COMMENT_END_PREP */
1824 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mmc_spi_driver" */
1825 ldv_handler_precall();
1826 mmc_spi_remove( var_group4);
1827 ldv_s_mmc_spi_driver_spi_driver=0;
1828
1829 }
1830
1831 }
1832
1833 break;
1834 case 4: {
1835
1836 /** CALLBACK SECTION request_irq **/
1837 LDV_IN_INTERRUPT=2;
1838
1839 /* content: static irqreturn_t mmc_spi_detect_irq(int irq, void *mmc)*/
1840 /* LDV_COMMENT_BEGIN_PREP */
1841 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
1842 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
1843 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
1844 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
1845 #define SPI_TOKEN_SINGLE 0xfe
1846 #define SPI_TOKEN_MULTI_WRITE 0xfc
1847 #define SPI_TOKEN_STOP_TRAN 0xfd
1848 #define MMC_SPI_BLOCKSIZE 512
1849 #define r1b_timeout (HZ * 3)
1850 #define MMC_SPI_BLOCKSATONCE 128
1851 #ifdef DEBUG
1852 #endif
1853 /* LDV_COMMENT_END_PREP */
1854 /* LDV_COMMENT_FUNCTION_CALL */
1855 ldv_handler_precall();
1856 mmc_spi_detect_irq( var_mmc_spi_detect_irq_16_p0, var_mmc_spi_detect_irq_16_p1);
1857 LDV_IN_INTERRUPT=1;
1858
1859
1860
1861 }
1862
1863 break;
1864 default: break;
1865
1866 }
1867
1868 }
1869
1870 ldv_module_exit:
1871
1872 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
1873 ldv_final: ldv_check_final_state();
1874
1875 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
1876 return;
1877
1878 }
1879 #endif
1880
1881 /* LDV_COMMENT_END_MAIN */
1882
1883 #line 9 "/home/druidos/temp/331_1a/work/current--X--drivers--X--defaultlinux-3.14.1.tar.xz--X--331_1a--X--cpachecker/linux-3.14.1.tar.xz/csd_deg_dscv/7740/dscv_tempdir/dscv/ri/331_1a/drivers/mmc/host/mmc_spi.o.c.prepared" 1
2 #include <linux/types.h>
3 #include <linux/dma-direction.h>
4 #include <verifier/rcv.h>
5 #include <verifier/set.h>
6 #include <verifier/map.h>
7
8 Set LDV_DMA_MAP_CALLS;
9
10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
11 dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir) {
12 dma_addr_t nonedetermined;
13
14 nonedetermined = ldv_undef_ptr();
15
16 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
17 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
18
19 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
20
21 return nonedetermined;
22 }
23
24 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
25 int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) {
26
27 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
28 ldv_assert(ldv_set_contains(LDV_DMA_MAP_CALLS, dma_addr));
29 ldv_set_remove(LDV_DMA_MAP_CALLS, dma_addr);
30
31 int nonedetermined;
32
33 nonedetermined = ldv_undef_int();
34
35 return nonedetermined;
36 }
37
38
39
40 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single') maps pci_dma */
41 dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) {
42 dma_addr_t nonedetermined;
43
44 nonedetermined = ldv_undef_ptr();
45
46 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
47 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
48
49 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
50
51 return nonedetermined;
52 }
53
54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single_attrs') maps pci_dma */
55 dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) {
56 dma_addr_t nonedetermined;
57
58 nonedetermined = ldv_undef_ptr();
59
60 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
61 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
62
63 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
64
65 return nonedetermined;
66 }
67
68 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Initialize all module reference counters at the beginning */
69 void ldv_initialize(void) {
70 /* LDV_COMMENT_CHANGE_STATE All module reference counters have some initial value at the beginning */
71 ldv_set_init(LDV_DMA_MAP_CALLS);
72 }
73
74 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
75 void ldv_check_final_state(void) {
76 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
77 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
78 } 1 /*
2 * linux/include/linux/mmc/host.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Host driver specific definitions.
9 */
10 #ifndef LINUX_MMC_HOST_H
11 #define LINUX_MMC_HOST_H
12
13 #include <linux/leds.h>
14 #include <linux/mutex.h>
15 #include <linux/sched.h>
16 #include <linux/device.h>
17 #include <linux/fault-inject.h>
18
19 #include <linux/mmc/core.h>
20 #include <linux/mmc/pm.h>
21
22 struct mmc_ios {
23 unsigned int clock; /* clock rate */
24 unsigned short vdd;
25
26 /* vdd stores the bit number of the selected voltage range from below. */
27
28 unsigned char bus_mode; /* command output mode */
29
30 #define MMC_BUSMODE_OPENDRAIN 1
31 #define MMC_BUSMODE_PUSHPULL 2
32
33 unsigned char chip_select; /* SPI chip select */
34
35 #define MMC_CS_DONTCARE 0
36 #define MMC_CS_HIGH 1
37 #define MMC_CS_LOW 2
38
39 unsigned char power_mode; /* power supply mode */
40
41 #define MMC_POWER_OFF 0
42 #define MMC_POWER_UP 1
43 #define MMC_POWER_ON 2
44
45 unsigned char bus_width; /* data bus width */
46
47 #define MMC_BUS_WIDTH_1 0
48 #define MMC_BUS_WIDTH_4 2
49 #define MMC_BUS_WIDTH_8 3
50
51 unsigned char timing; /* timing specification used */
52
53 #define MMC_TIMING_LEGACY 0
54 #define MMC_TIMING_MMC_HS 1
55 #define MMC_TIMING_SD_HS 2
56 #define MMC_TIMING_UHS_SDR12 3
57 #define MMC_TIMING_UHS_SDR25 4
58 #define MMC_TIMING_UHS_SDR50 5
59 #define MMC_TIMING_UHS_SDR104 6
60 #define MMC_TIMING_UHS_DDR50 7
61 #define MMC_TIMING_MMC_HS200 8
62
63 #define MMC_SDR_MODE 0
64 #define MMC_1_2V_DDR_MODE 1
65 #define MMC_1_8V_DDR_MODE 2
66 #define MMC_1_2V_SDR_MODE 3
67 #define MMC_1_8V_SDR_MODE 4
68
69 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
70
71 #define MMC_SIGNAL_VOLTAGE_330 0
72 #define MMC_SIGNAL_VOLTAGE_180 1
73 #define MMC_SIGNAL_VOLTAGE_120 2
74
75 unsigned char drv_type; /* driver type (A, B, C, D) */
76
77 #define MMC_SET_DRIVER_TYPE_B 0
78 #define MMC_SET_DRIVER_TYPE_A 1
79 #define MMC_SET_DRIVER_TYPE_C 2
80 #define MMC_SET_DRIVER_TYPE_D 3
81 };
82
83 struct mmc_host_ops {
84 /*
85 * 'enable' is called when the host is claimed and 'disable' is called
86 * when the host is released. 'enable' and 'disable' are deprecated.
87 */
88 int (*enable)(struct mmc_host *host);
89 int (*disable)(struct mmc_host *host);
90 /*
91 * It is optional for the host to implement pre_req and post_req in
92 * order to support double buffering of requests (prepare one
93 * request while another request is active).
94 * pre_req() must always be followed by a post_req().
95 * To undo a call made to pre_req(), call post_req() with
96 * a nonzero err condition.
97 */
98 void (*post_req)(struct mmc_host *host, struct mmc_request *req,
99 int err);
100 void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
101 bool is_first_req);
102 void (*request)(struct mmc_host *host, struct mmc_request *req);
103 /*
104 * Avoid calling these three functions too often or in a "fast path",
105 * since underlaying controller might implement them in an expensive
106 * and/or slow way.
107 *
108 * Also note that these functions might sleep, so don't call them
109 * in the atomic contexts!
110 *
111 * Return values for the get_ro callback should be:
112 * 0 for a read/write card
113 * 1 for a read-only card
114 * -ENOSYS when not supported (equal to NULL callback)
115 * or a negative errno value when something bad happened
116 *
117 * Return values for the get_cd callback should be:
118 * 0 for a absent card
119 * 1 for a present card
120 * -ENOSYS when not supported (equal to NULL callback)
121 * or a negative errno value when something bad happened
122 */
123 void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
124 int (*get_ro)(struct mmc_host *host);
125 int (*get_cd)(struct mmc_host *host);
126
127 void (*enable_sdio_irq)(struct mmc_host *host, int enable);
128
129 /* optional callback for HC quirks */
130 void (*init_card)(struct mmc_host *host, struct mmc_card *card);
131
132 int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
133
134 /* Check if the card is pulling dat[0:3] low */
135 int (*card_busy)(struct mmc_host *host);
136
137 /* The tuning command opcode value is different for SD and eMMC cards */
138 int (*execute_tuning)(struct mmc_host *host, u32 opcode);
139 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
140 void (*hw_reset)(struct mmc_host *host);
141 void (*card_event)(struct mmc_host *host);
142 };
143
144 struct mmc_card;
145 struct device;
146
147 struct mmc_async_req {
148 /* active mmc request */
149 struct mmc_request *mrq;
150 /*
151 * Check error status of completed mmc request.
152 * Returns 0 if success otherwise non zero.
153 */
154 int (*err_check) (struct mmc_card *, struct mmc_async_req *);
155 };
156
157 /**
158 * struct mmc_slot - MMC slot functions
159 *
160 * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
161 * @lock: protect the @handler_priv pointer
162 * @handler_priv: MMC/SD-card slot context
163 *
164 * Some MMC/SD host controllers implement slot-functions like card and
165 * write-protect detection natively. However, a large number of controllers
166 * leave these functions to the CPU. This struct provides a hook to attach
167 * such slot-function drivers.
168 */
169 struct mmc_slot {
170 int cd_irq;
171 struct mutex lock;
172 void *handler_priv;
173 };
174
175 /**
176 * mmc_context_info - synchronization details for mmc context
177 * @is_done_rcv wake up reason was done request
178 * @is_new_req wake up reason was new request
179 * @is_waiting_last_req mmc context waiting for single running request
180 * @wait wait queue
181 * @lock lock to protect data fields
182 */
183 struct mmc_context_info {
184 bool is_done_rcv;
185 bool is_new_req;
186 bool is_waiting_last_req;
187 wait_queue_head_t wait;
188 spinlock_t lock;
189 };
190
191 struct regulator;
192
193 struct mmc_supply {
194 struct regulator *vmmc; /* Card power supply */
195 struct regulator *vqmmc; /* Optional Vccq supply */
196 };
197
198 struct mmc_host {
199 struct device *parent;
200 struct device class_dev;
201 int index;
202 const struct mmc_host_ops *ops;
203 unsigned int f_min;
204 unsigned int f_max;
205 unsigned int f_init;
206 u32 ocr_avail;
207 u32 ocr_avail_sdio; /* SDIO-specific OCR */
208 u32 ocr_avail_sd; /* SD-specific OCR */
209 u32 ocr_avail_mmc; /* MMC-specific OCR */
210 struct notifier_block pm_notify;
211 u32 max_current_330;
212 u32 max_current_300;
213 u32 max_current_180;
214
215 #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
216 #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */
217 #define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */
218 #define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */
219 #define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */
220 #define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */
221 #define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */
222 #define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */
223 #define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */
224 #define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */
225 #define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */
226 #define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */
227 #define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */
228 #define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */
229 #define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */
230 #define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */
231 #define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
232
233 u32 caps; /* Host capabilities */
234
235 #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
236 #define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */
237 #define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */
238 #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */
239 #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
240 #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
241 #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */
242 #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */
243 #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
244 #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
245 #define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
246 #define MMC_CAP_1_8V_DDR (1 << 11) /* can support */
247 /* DDR mode at 1.8V */
248 #define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
249 /* DDR mode at 1.2V */
250 #define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
251 #define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
252 #define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */
253 #define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */
254 #define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
255 #define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
256 #define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
257 #define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */
258 #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
259 #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
260 #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
261 #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
262 #define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
263
264 u32 caps2; /* More host capabilities */
265
266 #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
267 #define MMC_CAP2_CACHE_CTRL (1 << 1) /* Allow cache control */
268 #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
269 #define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */
270 #define MMC_CAP2_NO_SLEEP_CMD (1 << 4) /* Don't allow sleep command */
271 #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
272 #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
273 #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
274 MMC_CAP2_HS200_1_2V_SDR)
275 #define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */
276 #define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
277 #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
278 #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
279 #define MMC_CAP2_PACKED_RD (1 << 12) /* Allow packed read */
280 #define MMC_CAP2_PACKED_WR (1 << 13) /* Allow packed write */
281 #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
282 MMC_CAP2_PACKED_WR)
283 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
284 #define MMC_CAP2_SANITIZE (1 << 15) /* Support Sanitize */
285
286 mmc_pm_flag_t pm_caps; /* supported pm features */
287
288 #ifdef CONFIG_MMC_CLKGATE
289 int clk_requests; /* internal reference counter */
290 unsigned int clk_delay; /* number of MCI clk hold cycles */
291 bool clk_gated; /* clock gated */
292 struct delayed_work clk_gate_work; /* delayed clock gate */
293 unsigned int clk_old; /* old clock value cache */
294 spinlock_t clk_lock; /* lock for clk fields */
295 struct mutex clk_gate_mutex; /* mutex for clock gating */
296 struct device_attribute clkgate_delay_attr;
297 unsigned long clkgate_delay;
298 #endif
299
300 /* host specific block data */
301 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
302 unsigned short max_segs; /* see blk_queue_max_segments */
303 unsigned short unused;
304 unsigned int max_req_size; /* maximum number of bytes in one req */
305 unsigned int max_blk_size; /* maximum size of one mmc block */
306 unsigned int max_blk_count; /* maximum number of blocks in one req */
307 unsigned int max_discard_to; /* max. discard timeout in ms */
308
309 /* private data */
310 spinlock_t lock; /* lock for claim and bus ops */
311
312 struct mmc_ios ios; /* current io bus settings */
313
314 /* group bitfields together to minimize padding */
315 unsigned int use_spi_crc:1;
316 unsigned int claimed:1; /* host exclusively claimed */
317 unsigned int bus_dead:1; /* bus has been released */
318 #ifdef CONFIG_MMC_DEBUG
319 unsigned int removed:1; /* host is being removed */
320 #endif
321
322 int rescan_disable; /* disable card detection */
323 int rescan_entered; /* used with nonremovable devices */
324
325 struct mmc_card *card; /* device attached to this host */
326
327 wait_queue_head_t wq;
328 struct task_struct *claimer; /* task that has host claimed */
329 int claim_cnt; /* "claim" nesting count */
330
331 struct delayed_work detect;
332 int detect_change; /* card detect flag */
333 struct mmc_slot slot;
334
335 const struct mmc_bus_ops *bus_ops; /* current bus driver */
336 unsigned int bus_refs; /* reference counter */
337
338 unsigned int sdio_irqs;
339 struct task_struct *sdio_irq_thread;
340 bool sdio_irq_pending;
341 atomic_t sdio_irq_thread_abort;
342
343 mmc_pm_flag_t pm_flags; /* requested pm features */
344
345 struct led_trigger *led; /* activity led */
346
347 #ifdef CONFIG_REGULATOR
348 bool regulator_enabled; /* regulator state */
349 #endif
350 struct mmc_supply supply;
351
352 struct dentry *debugfs_root;
353
354 struct mmc_async_req *areq; /* active async req */
355 struct mmc_context_info context_info; /* async synchronization info */
356
357 #ifdef CONFIG_FAIL_MMC_REQUEST
358 struct fault_attr fail_mmc_request;
359 #endif
360
361 unsigned int actual_clock; /* Actual HC clock rate */
362
363 unsigned int slotno; /* used for sdio acpi binding */
364
365 unsigned long private[0] ____cacheline_aligned;
366 };
367
368 struct mmc_host *mmc_alloc_host(int extra, struct device *);
369 int mmc_add_host(struct mmc_host *);
370 void mmc_remove_host(struct mmc_host *);
371 void mmc_free_host(struct mmc_host *);
372 int mmc_of_parse(struct mmc_host *host);
373
374 static inline void *mmc_priv(struct mmc_host *host)
375 {
376 return (void *)host->private;
377 }
378
379 #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
380
381 #define mmc_dev(x) ((x)->parent)
382 #define mmc_classdev(x) (&(x)->class_dev)
383 #define mmc_hostname(x) (dev_name(&(x)->class_dev))
384
385 int mmc_power_save_host(struct mmc_host *host);
386 int mmc_power_restore_host(struct mmc_host *host);
387
388 void mmc_detect_change(struct mmc_host *, unsigned long delay);
389 void mmc_request_done(struct mmc_host *, struct mmc_request *);
390
391 int mmc_cache_ctrl(struct mmc_host *, u8);
392
393 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
394 {
395 host->ops->enable_sdio_irq(host, 0);
396 host->sdio_irq_pending = true;
397 wake_up_process(host->sdio_irq_thread);
398 }
399
400 #ifdef CONFIG_REGULATOR
401 int mmc_regulator_get_ocrmask(struct regulator *supply);
402 int mmc_regulator_set_ocr(struct mmc_host *mmc,
403 struct regulator *supply,
404 unsigned short vdd_bit);
405 int mmc_regulator_get_supply(struct mmc_host *mmc);
406 #else
407 static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
408 {
409 return 0;
410 }
411
412 static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
413 struct regulator *supply,
414 unsigned short vdd_bit)
415 {
416 return 0;
417 }
418
419 static inline int mmc_regulator_get_supply(struct mmc_host *mmc)
420 {
421 return 0;
422 }
423 #endif
424
425 int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *);
426
427 /* Module parameter */
428 extern bool mmc_assume_removable;
429
430 static inline int mmc_card_is_removable(struct mmc_host *host)
431 {
432 return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
433 }
434
435 static inline int mmc_card_keep_power(struct mmc_host *host)
436 {
437 return host->pm_flags & MMC_PM_KEEP_POWER;
438 }
439
440 static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
441 {
442 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
443 }
444
445 static inline int mmc_host_cmd23(struct mmc_host *host)
446 {
447 return host->caps & MMC_CAP_CMD23;
448 }
449
450 static inline int mmc_boot_partition_access(struct mmc_host *host)
451 {
452 return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
453 }
454
455 static inline int mmc_host_uhs(struct mmc_host *host)
456 {
457 return host->caps &
458 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
459 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
460 MMC_CAP_UHS_DDR50);
461 }
462
463 static inline int mmc_host_packed_wr(struct mmc_host *host)
464 {
465 return host->caps2 & MMC_CAP2_PACKED_WR;
466 }
467
468 #ifdef CONFIG_MMC_CLKGATE
469 void mmc_host_clk_hold(struct mmc_host *host);
470 void mmc_host_clk_release(struct mmc_host *host);
471 unsigned int mmc_host_clk_rate(struct mmc_host *host);
472
473 #else
474 static inline void mmc_host_clk_hold(struct mmc_host *host)
475 {
476 }
477
478 static inline void mmc_host_clk_release(struct mmc_host *host)
479 {
480 }
481
482 static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
483 {
484 return host->ios.clock;
485 }
486 #endif
487 #endif /* LINUX_MMC_HOST_H */ 1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
9 */
10
11 #ifndef _LINUX_SLAB_H
12 #define _LINUX_SLAB_H
13
14 #include <linux/gfp.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17
18
19 /*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
22 */
23 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
30 /*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
64 */
65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
68
69 /* Flag to prevent checks on free */
70 #ifdef CONFIG_DEBUG_OBJECTS
71 # define SLAB_DEBUG_OBJECTS 0x00400000UL
72 #else
73 # define SLAB_DEBUG_OBJECTS 0x00000000UL
74 #endif
75
76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
77
78 /* Don't track use of uninitialized memory */
79 #ifdef CONFIG_KMEMCHECK
80 # define SLAB_NOTRACK 0x01000000UL
81 #else
82 # define SLAB_NOTRACK 0x00000000UL
83 #endif
84 #ifdef CONFIG_FAILSLAB
85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
86 #else
87 # define SLAB_FAILSLAB 0x00000000UL
88 #endif
89
90 /* The following flags affect the page allocator grouping pages by mobility */
91 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
92 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
93 /*
94 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
95 *
96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
97 *
98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
99 * Both make kfree a no-op.
100 */
101 #define ZERO_SIZE_PTR ((void *)16)
102
103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
104 (unsigned long)ZERO_SIZE_PTR)
105
106 #include <linux/kmemleak.h>
107
108 struct mem_cgroup;
109 /*
110 * struct kmem_cache related prototypes
111 */
112 void __init kmem_cache_init(void);
113 int slab_is_available(void);
114
115 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long,
117 void (*)(void *));
118 struct kmem_cache *
119 kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
120 unsigned long, void (*)(void *), struct kmem_cache *);
121 void kmem_cache_destroy(struct kmem_cache *);
122 int kmem_cache_shrink(struct kmem_cache *);
123 void kmem_cache_free(struct kmem_cache *, void *);
124
125 /*
126 * Please use this macro to create slab caches. Simply specify the
127 * name of the structure and maybe some flags that are listed above.
128 *
129 * The alignment of the struct determines object alignment. If you
130 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
131 * then the objects will be properly aligned in SMP configurations.
132 */
133 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
134 sizeof(struct __struct), __alignof__(struct __struct),\
135 (__flags), NULL)
136
137 /*
138 * Common kmalloc functions provided by all allocators
139 */
140 void * __must_check __krealloc(const void *, size_t, gfp_t);
141 void * __must_check krealloc(const void *, size_t, gfp_t);
142 void kfree(const void *);
143 void kzfree(const void *);
144 size_t ksize(const void *);
145
146 /*
147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
148 * alignment larger than the alignment of a 64-bit integer.
149 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
150 */
151 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
152 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
153 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
154 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
155 #else
156 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
157 #endif
158
159 #ifdef CONFIG_SLOB
160 /*
161 * Common fields provided in kmem_cache by all slab allocators
162 * This struct is either used directly by the allocator (SLOB)
163 * or the allocator must include definitions for all fields
164 * provided in kmem_cache_common in their definition of kmem_cache.
165 *
166 * Once we can do anonymous structs (C11 standard) we could put a
167 * anonymous struct definition in these allocators so that the
168 * separate allocations in the kmem_cache structure of SLAB and
169 * SLUB is no longer needed.
170 */
171 struct kmem_cache {
172 unsigned int object_size;/* The original size of the object */
173 unsigned int size; /* The aligned/padded/added on size */
174 unsigned int align; /* Alignment as calculated */
175 unsigned long flags; /* Active flags on the slab */
176 const char *name; /* Slab name for sysfs */
177 int refcount; /* Use counter */
178 void (*ctor)(void *); /* Called on object slot creation */
179 struct list_head list; /* List of all slab caches on the system */
180 };
181
182 #endif /* CONFIG_SLOB */
183
184 /*
185 * Kmalloc array related definitions
186 */
187
188 #ifdef CONFIG_SLAB
189 /*
190 * The largest kmalloc size supported by the SLAB allocators is
191 * 32 megabyte (2^25) or the maximum allocatable page order if that is
192 * less than 32 MB.
193 *
194 * WARNING: Its not easy to increase this value since the allocators have
195 * to do various tricks to work around compiler limitations in order to
196 * ensure proper constant folding.
197 */
198 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
199 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
200 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
201 #ifndef KMALLOC_SHIFT_LOW
202 #define KMALLOC_SHIFT_LOW 5
203 #endif
204 #endif
205
206 #ifdef CONFIG_SLUB
207 /*
208 * SLUB directly allocates requests fitting in to an order-1 page
209 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
210 */
211 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
212 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
213 #ifndef KMALLOC_SHIFT_LOW
214 #define KMALLOC_SHIFT_LOW 3
215 #endif
216 #endif
217
218 #ifdef CONFIG_SLOB
219 /*
220 * SLOB passes all requests larger than one page to the page allocator.
221 * No kmalloc array is necessary since objects of different sizes can
222 * be allocated from the same page.
223 */
224 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
225 #define KMALLOC_SHIFT_MAX 30
226 #ifndef KMALLOC_SHIFT_LOW
227 #define KMALLOC_SHIFT_LOW 3
228 #endif
229 #endif
230
231 /* Maximum allocatable size */
232 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
233 /* Maximum size for which we actually use a slab cache */
234 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
235 /* Maximum order allocatable via the slab allocagtor */
236 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
237
238 /*
239 * Kmalloc subsystem.
240 */
241 #ifndef KMALLOC_MIN_SIZE
242 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
243 #endif
244
245 #ifndef CONFIG_SLOB
246 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
247 #ifdef CONFIG_ZONE_DMA
248 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
249 #endif
250
251 /*
252 * Figure out which kmalloc slab an allocation of a certain size
253 * belongs to.
254 * 0 = zero alloc
255 * 1 = 65 .. 96 bytes
256 * 2 = 120 .. 192 bytes
257 * n = 2^(n-1) .. 2^n -1
258 */
259 static __always_inline int kmalloc_index(size_t size)
260 {
261 if (!size)
262 return 0;
263
264 if (size <= KMALLOC_MIN_SIZE)
265 return KMALLOC_SHIFT_LOW;
266
267 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
268 return 1;
269 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
270 return 2;
271 if (size <= 8) return 3;
272 if (size <= 16) return 4;
273 if (size <= 32) return 5;
274 if (size <= 64) return 6;
275 if (size <= 128) return 7;
276 if (size <= 256) return 8;
277 if (size <= 512) return 9;
278 if (size <= 1024) return 10;
279 if (size <= 2 * 1024) return 11;
280 if (size <= 4 * 1024) return 12;
281 if (size <= 8 * 1024) return 13;
282 if (size <= 16 * 1024) return 14;
283 if (size <= 32 * 1024) return 15;
284 if (size <= 64 * 1024) return 16;
285 if (size <= 128 * 1024) return 17;
286 if (size <= 256 * 1024) return 18;
287 if (size <= 512 * 1024) return 19;
288 if (size <= 1024 * 1024) return 20;
289 if (size <= 2 * 1024 * 1024) return 21;
290 if (size <= 4 * 1024 * 1024) return 22;
291 if (size <= 8 * 1024 * 1024) return 23;
292 if (size <= 16 * 1024 * 1024) return 24;
293 if (size <= 32 * 1024 * 1024) return 25;
294 if (size <= 64 * 1024 * 1024) return 26;
295 BUG();
296
297 /* Will never be reached. Needed because the compiler may complain */
298 return -1;
299 }
300 #endif /* !CONFIG_SLOB */
301
302 void *__kmalloc(size_t size, gfp_t flags);
303 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
304
305 #ifdef CONFIG_NUMA
306 void *__kmalloc_node(size_t size, gfp_t flags, int node);
307 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
308 #else
309 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
310 {
311 return __kmalloc(size, flags);
312 }
313
314 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
315 {
316 return kmem_cache_alloc(s, flags);
317 }
318 #endif
319
320 #ifdef CONFIG_TRACING
321 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
322
323 #ifdef CONFIG_NUMA
324 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
325 gfp_t gfpflags,
326 int node, size_t size);
327 #else
328 static __always_inline void *
329 kmem_cache_alloc_node_trace(struct kmem_cache *s,
330 gfp_t gfpflags,
331 int node, size_t size)
332 {
333 return kmem_cache_alloc_trace(s, gfpflags, size);
334 }
335 #endif /* CONFIG_NUMA */
336
337 #else /* CONFIG_TRACING */
338 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
339 gfp_t flags, size_t size)
340 {
341 return kmem_cache_alloc(s, flags);
342 }
343
344 static __always_inline void *
345 kmem_cache_alloc_node_trace(struct kmem_cache *s,
346 gfp_t gfpflags,
347 int node, size_t size)
348 {
349 return kmem_cache_alloc_node(s, gfpflags, node);
350 }
351 #endif /* CONFIG_TRACING */
352
353 #ifdef CONFIG_SLAB
354 #include <linux/slab_def.h>
355 #endif
356
357 #ifdef CONFIG_SLUB
358 #include <linux/slub_def.h>
359 #endif
360
361 static __always_inline void *
362 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
363 {
364 void *ret;
365
366 flags |= (__GFP_COMP | __GFP_KMEMCG);
367 ret = (void *) __get_free_pages(flags, order);
368 kmemleak_alloc(ret, size, 1, flags);
369 return ret;
370 }
371
372 #ifdef CONFIG_TRACING
373 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
374 #else
375 static __always_inline void *
376 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
377 {
378 return kmalloc_order(size, flags, order);
379 }
380 #endif
381
382 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
383 {
384 unsigned int order = get_order(size);
385 return kmalloc_order_trace(size, flags, order);
386 }
387
388 /**
389 * kmalloc - allocate memory
390 * @size: how many bytes of memory are required.
391 * @flags: the type of memory to allocate.
392 *
393 * kmalloc is the normal method of allocating memory
394 * for objects smaller than page size in the kernel.
395 *
396 * The @flags argument may be one of:
397 *
398 * %GFP_USER - Allocate memory on behalf of user. May sleep.
399 *
400 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
401 *
402 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
403 * For example, use this inside interrupt handlers.
404 *
405 * %GFP_HIGHUSER - Allocate pages from high memory.
406 *
407 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
408 *
409 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
410 *
411 * %GFP_NOWAIT - Allocation will not sleep.
412 *
413 * %__GFP_THISNODE - Allocate node-local memory only.
414 *
415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a
417 * slab created with SLAB_DMA.
418 *
419 * Also it is possible to set different flags by OR'ing
420 * in one or more of the following additional @flags:
421 *
422 * %__GFP_COLD - Request cache-cold pages instead of
423 * trying to return cache-warm pages.
424 *
425 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
426 *
427 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
428 * (think twice before using).
429 *
430 * %__GFP_NORETRY - If memory is not immediately available,
431 * then give up at once.
432 *
433 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
434 *
435 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
436 *
437 * There are other flags available as well, but these are not intended
438 * for general use, and so are not documented here. For a full list of
439 * potential flags, always refer to linux/gfp.h.
440 */
441 static __always_inline void *kmalloc(size_t size, gfp_t flags)
442 {
443 if (__builtin_constant_p(size)) {
444 if (size > KMALLOC_MAX_CACHE_SIZE)
445 return kmalloc_large(size, flags);
446 #ifndef CONFIG_SLOB
447 if (!(flags & GFP_DMA)) {
448 int index = kmalloc_index(size);
449
450 if (!index)
451 return ZERO_SIZE_PTR;
452
453 return kmem_cache_alloc_trace(kmalloc_caches[index],
454 flags, size);
455 }
456 #endif
457 }
458 return __kmalloc(size, flags);
459 }
460
461 /*
462 * Determine size used for the nth kmalloc cache.
463 * return size or 0 if a kmalloc cache for that
464 * size does not exist
465 */
466 static __always_inline int kmalloc_size(int n)
467 {
468 #ifndef CONFIG_SLOB
469 if (n > 2)
470 return 1 << n;
471
472 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
473 return 96;
474
475 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
476 return 192;
477 #endif
478 return 0;
479 }
480
481 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
482 {
483 #ifndef CONFIG_SLOB
484 if (__builtin_constant_p(size) &&
485 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
486 int i = kmalloc_index(size);
487
488 if (!i)
489 return ZERO_SIZE_PTR;
490
491 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
492 flags, node, size);
493 }
494 #endif
495 return __kmalloc_node(size, flags, node);
496 }
497
498 /*
499 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
500 * Intended for arches that get misalignment faults even for 64 bit integer
501 * aligned buffers.
502 */
503 #ifndef ARCH_SLAB_MINALIGN
504 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
505 #endif
506 /*
507 * This is the main placeholder for memcg-related information in kmem caches.
508 * struct kmem_cache will hold a pointer to it, so the memory cost while
509 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
510 * would otherwise be if that would be bundled in kmem_cache: we'll need an
511 * extra pointer chase. But the trade off clearly lays in favor of not
512 * penalizing non-users.
513 *
514 * Both the root cache and the child caches will have it. For the root cache,
515 * this will hold a dynamically allocated array large enough to hold
516 * information about the currently limited memcgs in the system. To allow the
517 * array to be accessed without taking any locks, on relocation we free the old
518 * version only after a grace period.
519 *
520 * Child caches will hold extra metadata needed for its operation. Fields are:
521 *
522 * @memcg: pointer to the memcg this cache belongs to
523 * @list: list_head for the list of all caches in this memcg
524 * @root_cache: pointer to the global, root cache, this cache was derived from
525 * @dead: set to true after the memcg dies; the cache may still be around.
526 * @nr_pages: number of pages that belongs to this cache.
527 * @destroy: worker to be called whenever we are ready, or believe we may be
528 * ready, to destroy this cache.
529 */
530 struct memcg_cache_params {
531 bool is_root_cache;
532 union {
533 struct {
534 struct rcu_head rcu_head;
535 struct kmem_cache *memcg_caches[0];
536 };
537 struct {
538 struct mem_cgroup *memcg;
539 struct list_head list;
540 struct kmem_cache *root_cache;
541 bool dead;
542 atomic_t nr_pages;
543 struct work_struct destroy;
544 };
545 };
546 };
547
548 int memcg_update_all_caches(int num_memcgs);
549
550 struct seq_file;
551 int cache_show(struct kmem_cache *s, struct seq_file *m);
552 void print_slabinfo_header(struct seq_file *m);
553
554 /**
555 * kmalloc_array - allocate memory for an array.
556 * @n: number of elements.
557 * @size: element size.
558 * @flags: the type of memory to allocate (see kmalloc).
559 */
560 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
561 {
562 if (size != 0 && n > SIZE_MAX / size)
563 return NULL;
564 return __kmalloc(n * size, flags);
565 }
566
567 /**
568 * kcalloc - allocate memory for an array. The memory is set to zero.
569 * @n: number of elements.
570 * @size: element size.
571 * @flags: the type of memory to allocate (see kmalloc).
572 */
573 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
574 {
575 return kmalloc_array(n, size, flags | __GFP_ZERO);
576 }
577
578 /*
579 * kmalloc_track_caller is a special version of kmalloc that records the
580 * calling function of the routine calling it for slab leak tracking instead
581 * of just the calling function (confusing, eh?).
582 * It's useful when the call to kmalloc comes from a widely-used standard
583 * allocator where we care about the real place the memory allocation
584 * request comes from.
585 */
586 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
587 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
588 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
589 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
590 #define kmalloc_track_caller(size, flags) \
591 __kmalloc_track_caller(size, flags, _RET_IP_)
592 #else
593 #define kmalloc_track_caller(size, flags) \
594 __kmalloc(size, flags)
595 #endif /* DEBUG_SLAB */
596
597 #ifdef CONFIG_NUMA
598 /*
599 * kmalloc_node_track_caller is a special version of kmalloc_node that
600 * records the calling function of the routine calling it for slab leak
601 * tracking instead of just the calling function (confusing, eh?).
602 * It's useful when the call to kmalloc_node comes from a widely-used
603 * standard allocator where we care about the real place the memory
604 * allocation request comes from.
605 */
606 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
607 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
608 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
609 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
610 #define kmalloc_node_track_caller(size, flags, node) \
611 __kmalloc_node_track_caller(size, flags, node, \
612 _RET_IP_)
613 #else
614 #define kmalloc_node_track_caller(size, flags, node) \
615 __kmalloc_node(size, flags, node)
616 #endif
617
618 #else /* CONFIG_NUMA */
619
620 #define kmalloc_node_track_caller(size, flags, node) \
621 kmalloc_track_caller(size, flags)
622
623 #endif /* CONFIG_NUMA */
624
625 /*
626 * Shortcuts
627 */
628 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
629 {
630 return kmem_cache_alloc(k, flags | __GFP_ZERO);
631 }
632
633 /**
634 * kzalloc - allocate memory. The memory is set to zero.
635 * @size: how many bytes of memory are required.
636 * @flags: the type of memory to allocate (see kmalloc).
637 */
638 static inline void *kzalloc(size_t size, gfp_t flags)
639 {
640 return kmalloc(size, flags | __GFP_ZERO);
641 }
642
643 /**
644 * kzalloc_node - allocate zeroed memory from a particular memory node.
645 * @size: how many bytes of memory are required.
646 * @flags: the type of memory to allocate (see kmalloc).
647 * @node: memory node from which to allocate
648 */
649 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
650 {
651 return kmalloc_node(size, flags | __GFP_ZERO, node);
652 }
653
654 /*
655 * Determine the size of a slab object
656 */
657 static inline unsigned int kmem_cache_size(struct kmem_cache *s)
658 {
659 return s->object_size;
660 }
661
662 void __init kmem_cache_init_late(void);
663
664 #endif /* _LINUX_SLAB_H */ 1 #ifndef __LINUX_SPI_MMC_SPI_H
2 #define __LINUX_SPI_MMC_SPI_H
3
4 #include <linux/spi/spi.h>
5 #include <linux/interrupt.h>
6
7 struct device;
8 struct mmc_host;
9
10 #define MMC_SPI_USE_CD_GPIO (1 << 0)
11 #define MMC_SPI_USE_RO_GPIO (1 << 1)
12 #define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2)
13 #define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3)
14
15 /* Put this in platform_data of a device being used to manage an MMC/SD
16 * card slot. (Modeled after PXA mmc glue; see that for usage examples.)
17 *
18 * REVISIT This is not a spi-specific notion. Any card slot should be
19 * able to handle it. If the MMC core doesn't adopt this kind of notion,
20 * switch the "struct device *" parameters over to "struct spi_device *".
21 */
22 struct mmc_spi_platform_data {
23 /* driver activation and (optional) card detect irq hookup */
24 int (*init)(struct device *,
25 irqreturn_t (*)(int, void *),
26 void *);
27 void (*exit)(struct device *, void *);
28
29 /*
30 * Card Detect and Read Only GPIOs. To enable debouncing on the card
31 * detect GPIO, set the cd_debounce to the debounce time in
32 * microseconds.
33 */
34 unsigned int flags;
35 unsigned int cd_gpio;
36 unsigned int cd_debounce;
37 unsigned int ro_gpio;
38
39 /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
40 unsigned long caps;
41 unsigned long caps2;
42
43 /* how long to debounce card detect, in msecs */
44 u16 detect_delay;
45
46 /* power management */
47 u16 powerup_msecs; /* delay of up to 250 msec */
48 u32 ocr_mask; /* available voltages */
49 void (*setpower)(struct device *, unsigned int maskval);
50 };
51
52 #ifdef CONFIG_OF
53 extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi);
54 extern void mmc_spi_put_pdata(struct spi_device *spi);
55 #else
56 static inline struct mmc_spi_platform_data *
57 mmc_spi_get_pdata(struct spi_device *spi)
58 {
59 return spi->dev.platform_data;
60 }
61 static inline void mmc_spi_put_pdata(struct spi_device *spi) {}
62 #endif /* CONFIG_OF */
63
64 #endif /* __LINUX_SPI_MMC_SPI_H */ 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-3.14.1.tar.xz | drivers/mmc/host/mmc_spi.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-02-06 02:39:45 | L0217 |
Comment
reported: 6 Feb 2016
[Home]