Error Trace
[Home]
Bug # 124
Show/hide error trace Error trace
{ 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 159 typedef unsigned int oom_flags_t; 162 typedef u64 phys_addr_t; 167 typedef phys_addr_t resource_size_t; 177 struct __anonstruct_atomic_t_6 { int counter; } ; 177 typedef struct __anonstruct_atomic_t_6 atomic_t; 182 struct __anonstruct_atomic64_t_7 { long counter; } ; 182 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 183 struct list_head { struct list_head *next; struct list_head *prev; } ; 188 struct hlist_node ; 188 struct hlist_head { struct hlist_node *first; } ; 192 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 203 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 234 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 5 struct device ; 5 struct page ; 7 struct dma_attrs ; 13 typedef unsigned long kernel_ulong_t; 14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 131 typedef void (*ctor_fn_t)(); 148 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ; 250 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 72 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_12 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_13 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_11 { struct __anonstruct____missing_field_name_12 __annonCompField4; struct __anonstruct____missing_field_name_13 __annonCompField5; } ; 66 struct desc_struct { union __anonunion____missing_field_name_11 __annonCompField6; } ; 12 typedef unsigned long pteval_t; 13 typedef unsigned long pmdval_t; 15 typedef unsigned long pgdval_t; 16 typedef unsigned long pgprotval_t; 18 struct __anonstruct_pte_t_14 { pteval_t pte; } ; 18 typedef struct __anonstruct_pte_t_14 pte_t; 20 struct pgprot { pgprotval_t pgprot; } ; 221 typedef struct pgprot pgprot_t; 223 struct __anonstruct_pgd_t_15 { pgdval_t pgd; } ; 223 typedef struct __anonstruct_pgd_t_15 pgd_t; 262 struct __anonstruct_pmd_t_17 { pmdval_t pmd; } ; 262 typedef struct __anonstruct_pmd_t_17 pmd_t; 390 typedef struct page *pgtable_t; 401 struct file ; 414 struct seq_file ; 452 struct thread_struct ; 454 struct mm_struct ; 455 struct task_struct ; 456 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 683 struct cpuinfo_x86 ; 234 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 420 struct file_operations ; 432 struct completion ; 27 union __anonunion___u_19 { struct list_head *__val; char __c[1U]; } ; 17 struct lockdep_map ; 26 union __anonunion___u_35 { int __val; char __c[1U]; } ; 38 union __anonunion___u_37 { int __val; char __c[1U]; } ; 23 typedef atomic64_t atomic_long_t; 186 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 546 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_47 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_46 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_47 __annonCompField7; } ; 33 struct spinlock { union __anonunion____missing_field_name_46 __annonCompField8; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_48 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_48 rwlock_t; 24 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 14 struct __anonstruct_pfn_t_52 { unsigned long val; } ; 14 typedef struct __anonstruct_pfn_t_52 pfn_t; 328 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 656 typedef struct cpumask *cpumask_var_t; 82 struct static_key { atomic_t enabled; } ; 264 struct tracepoint_func { void *func; void *data; int prio; } ; 18 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 260 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_61 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_62 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_60 { struct __anonstruct____missing_field_name_61 __annonCompField14; struct __anonstruct____missing_field_name_62 __annonCompField15; } ; 26 union __anonunion____missing_field_name_63 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_60 __annonCompField16; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_63 __annonCompField17; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 214 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 220 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 235 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 252 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ; 81 struct cpuinfo_x86 { __u8 x86; __u8 x86_vendor; __u8 x86_model; __u8 x86_mask; int x86_tlbsize; __u8 x86_virt_bits; __u8 x86_phys_bits; __u8 x86_coreid_bits; __u32 extended_cpuid_level; int cpuid_level; __u32 x86_capability[17U]; char x86_vendor_id[16U]; char x86_model_id[64U]; int x86_cache_size; int x86_cache_alignment; int x86_cache_max_rmid; int x86_cache_occ_scale; int x86_power; unsigned long loops_per_jiffy; u16 x86_max_cores; u16 apicid; u16 initial_apicid; u16 x86_clflush_size; u16 booted_cores; u16 phys_proc_id; u16 cpu_core_id; u8 compute_unit_id; u16 cpu_index; u32 microcode; } ; 170 struct seq_operations ; 369 struct perf_event ; 370 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; struct fpu fpu; } ; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 177 struct timespec ; 178 struct compat_timespec ; 179 struct __anonstruct_futex_77 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 179 struct __anonstruct_nanosleep_78 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 179 struct pollfd ; 179 struct __anonstruct_poll_79 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 179 union __anonunion____missing_field_name_76 { struct __anonstruct_futex_77 futex; struct __anonstruct_nanosleep_78 nanosleep; struct __anonstruct_poll_79 poll; } ; 179 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_76 __annonCompField20; } ; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 404 struct __anonstruct_seqlock_t_92 { struct seqcount seqcount; spinlock_t lock; } ; 404 typedef struct __anonstruct_seqlock_t_92 seqlock_t; 12 struct __wait_queue ; 12 typedef struct __wait_queue wait_queue_t; 15 struct __wait_queue { unsigned int flags; void *private; int (*func)(wait_queue_t *, unsigned int, int, void *); struct list_head task_list; } ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 1221 struct completion { unsigned int done; wait_queue_head_t wait; } ; 105 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 446 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 1147 union __anonunion____missing_field_name_93 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 1147 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_93 __annonCompField21; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 199 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 98 struct dentry ; 99 struct iattr ; 100 struct vm_area_struct ; 101 struct super_block ; 102 struct file_system_type ; 103 struct kernfs_open_node ; 104 struct kernfs_iattrs ; 127 struct kernfs_root ; 127 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_100 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_100 __annonCompField22; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ; 155 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 171 struct vm_operations_struct ; 171 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 188 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 284 struct inode ; 493 struct sock ; 494 struct kobject ; 495 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 501 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_103 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_103 kuid_t; 27 struct __anonstruct_kgid_t_104 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_104 kgid_t; 139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct bin_attribute ; 37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 139 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 238 struct hrtimer ; 239 enum hrtimer_restart ; 838 struct nsproxy ; 259 struct workqueue_struct ; 260 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 95 struct __anonstruct_nodemask_t_107 { unsigned long bits[16U]; } ; 95 typedef struct __anonstruct_nodemask_t_107 nodemask_t; 357 enum node_states { N_POSSIBLE = 0, N_ONLINE = 1, N_NORMAL_MEMORY = 2, N_HIGH_MEMORY = 2, N_MEMORY = 3, N_CPU = 4, NR_NODE_STATES = 5 } ; 520 struct path ; 521 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ; 35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 227 struct pinctrl ; 228 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 726 struct rw_semaphore ; 727 struct rw_semaphore { long count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 169 struct pci_dev ; 133 struct pci_bus ; 25 struct ldt_struct ; 25 struct __anonstruct_mm_context_t_172 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; atomic_t perf_rdpmc_allowed; } ; 25 typedef struct __anonstruct_mm_context_t_172 mm_context_t; 22 struct bio_vec ; 1211 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 17 struct call_single_data { struct llist_node llist; void (*func)(void *); void *info; unsigned int flags; } ; 559 struct dma_map_ops ; 559 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 61 struct device_attribute ; 61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 139 struct device_type ; 198 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 204 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 354 struct class_attribute ; 354 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 447 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 515 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 543 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 684 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 693 struct irq_domain ; 693 struct dma_coherent_mem ; 693 struct cma ; 693 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 847 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 60 struct kmem_cache ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_207 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_208 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_206 { struct __anonstruct____missing_field_name_207 __annonCompField37; struct __anonstruct____missing_field_name_208 __annonCompField38; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_206 __annonCompField39; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 110 struct xol_area ; 111 struct uprobes_state { struct xol_area *xol_area; } ; 150 struct address_space ; 151 struct mem_cgroup ; 152 union __anonunion____missing_field_name_209 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 152 union __anonunion____missing_field_name_211 { unsigned long index; void *freelist; } ; 152 struct __anonstruct____missing_field_name_215 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 152 union __anonunion____missing_field_name_214 { atomic_t _mapcount; struct __anonstruct____missing_field_name_215 __annonCompField42; int units; } ; 152 struct __anonstruct____missing_field_name_213 { union __anonunion____missing_field_name_214 __annonCompField43; atomic_t _count; } ; 152 union __anonunion____missing_field_name_212 { unsigned long counters; struct __anonstruct____missing_field_name_213 __annonCompField44; unsigned int active; } ; 152 struct __anonstruct____missing_field_name_210 { union __anonunion____missing_field_name_211 __annonCompField41; union __anonunion____missing_field_name_212 __annonCompField45; } ; 152 struct dev_pagemap ; 152 struct __anonstruct____missing_field_name_217 { struct page *next; int pages; int pobjects; } ; 152 struct __anonstruct____missing_field_name_218 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 152 struct __anonstruct____missing_field_name_219 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 152 union __anonunion____missing_field_name_216 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_217 __annonCompField47; struct callback_head callback_head; struct __anonstruct____missing_field_name_218 __annonCompField48; struct __anonstruct____missing_field_name_219 __annonCompField49; } ; 152 union __anonunion____missing_field_name_220 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 152 struct page { unsigned long flags; union __anonunion____missing_field_name_209 __annonCompField40; struct __anonstruct____missing_field_name_210 __annonCompField46; union __anonunion____missing_field_name_216 __annonCompField50; union __anonunion____missing_field_name_220 __annonCompField51; struct mem_cgroup *mem_cgroup; } ; 191 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 276 struct userfaultfd_ctx ; 276 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 283 struct __anonstruct_shared_221 { struct rb_node rb; unsigned long rb_subtree_last; } ; 283 struct anon_vma ; 283 struct mempolicy ; 283 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_221 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 356 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 361 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 375 struct task_rss_stat { int events; int count[4U]; } ; 383 struct mm_rss_stat { atomic_long_t count[4U]; } ; 388 struct kioctx_table ; 389 struct linux_binfmt ; 389 struct mmu_notifier_mm ; 389 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 78 struct user_struct ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_224 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_224 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_226 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_227 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_228 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_229 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_231 { void *_lower; void *_upper; } ; 11 struct __anonstruct__sigfault_230 { void *_addr; short _addr_lsb; struct __anonstruct__addr_bnd_231 _addr_bnd; } ; 11 struct __anonstruct__sigpoll_232 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_233 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_225 { int _pad[28U]; struct __anonstruct__kill_226 _kill; struct __anonstruct__timer_227 _timer; struct __anonstruct__rt_228 _rt; struct __anonstruct__sigchld_229 _sigchld; struct __anonstruct__sigfault_230 _sigfault; struct __anonstruct__sigpoll_232 _sigpoll; struct __anonstruct__sigsys_233 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_225 _sifields; } ; 113 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 242 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 256 struct k_sigaction { struct sigaction sa; } ; 442 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 449 struct pid_namespace ; 449 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 174 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 53 struct seccomp_filter ; 54 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ; 123 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 156 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 466 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 39 struct assoc_array_ptr ; 39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct cred ; 38 struct key_type ; 42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_252 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_253 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_255 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_254 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_255 __annonCompField54; } ; 128 struct __anonstruct____missing_field_name_257 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_256 { union key_payload payload; struct __anonstruct____missing_field_name_257 __annonCompField56; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_252 __annonCompField52; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_253 __annonCompField53; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_254 __annonCompField55; union __anonunion____missing_field_name_256 __annonCompField57; } ; 354 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 377 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 327 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 333 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ; 54 struct cgroup ; 55 struct cgroup_root ; 56 struct cgroup_subsys ; 57 struct cgroup_taskset ; 100 struct cgroup_file { struct kernfs_node *kn; } ; 89 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ; 134 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; struct callback_head callback_head; } ; 210 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; unsigned int subtree_control; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ; 294 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 333 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 418 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); void (*css_e_css_changed)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 135 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 482 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 523 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 531 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 538 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 563 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 579 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 601 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 646 struct autogroup ; 647 struct tty_struct ; 647 struct taskstats ; 647 struct tty_audit_buf ; 647 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 814 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 859 struct backing_dev_info ; 860 struct reclaim_state ; 861 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 875 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 923 struct wake_q_node { struct wake_q_node *next; } ; 1150 struct io_context ; 1184 struct pipe_inode_info ; 1186 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1193 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1213 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1248 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1285 struct rt_rq ; 1285 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1301 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1369 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1388 struct sched_class ; 1388 struct files_struct ; 1388 struct compat_robust_list_head ; 1388 struct numa_group ; 1388 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct thread_struct thread; } ; 70 struct hotplug_slot ; 70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ; 110 typedef int pci_power_t; 137 typedef unsigned int pci_channel_state_t; 138 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; 163 typedef unsigned short pci_dev_flags_t; 192 typedef unsigned short pci_bus_flags_t; 249 struct pcie_link_state ; 250 struct pci_vpd ; 251 struct pci_sriov ; 253 struct proc_dir_entry ; 253 struct pci_driver ; 253 union __anonunion____missing_field_name_282 { struct pci_sriov *sriov; struct pci_dev *physfn; } ; 253 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; u8 dma_alias_devfn; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_282 __annonCompField63; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ; 453 struct pci_ops ; 453 struct msi_controller ; 453 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ; 577 struct pci_ops { void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ; 605 struct pci_dynids { spinlock_t lock; struct list_head list; } ; 619 typedef unsigned int pci_ers_result_t; 629 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ; 662 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ; 93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 68 struct file_ra_state ; 69 struct writeback_control ; 70 struct bdi_writeback ; 226 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; unsigned long max_pgoff; pte_t *pte; } ; 262 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 1285 struct kvec ; 2365 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 1405 struct acpi_device ; 1405 struct pci_sysdata { int domain; int node; struct acpi_device *companion; void *iommu; } ; 34 struct dma_attrs { unsigned long flags[1U]; } ; 158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 1992 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 62 struct exception_table_entry { int insn; int fixup; } ; 54 struct __anonstruct_b_300 { unsigned char data; unsigned char error_feature; unsigned char sector; unsigned char nsector; unsigned char lcyl; unsigned char hcyl; unsigned char select; unsigned char status_command; unsigned char data_hob; unsigned char error_feature_hob; unsigned char sector_hob; unsigned char nsector_hob; unsigned char lcyl_hob; unsigned char hcyl_hob; unsigned char select_hob; unsigned char control_hob; } ; 54 union ide_reg_valid_s { unsigned short all; struct __anonstruct_b_300 b; } ; 57 typedef union ide_reg_valid_s ide_reg_valid_t; 58 struct ide_task_request_s { __u8 io_ports[8U]; __u8 hob_ports[8U]; ide_reg_valid_t out_flags; ide_reg_valid_t in_flags; int data_phase; int req_cmd; unsigned long out_size; unsigned long in_size; } ; 68 typedef struct ide_task_request_s ide_task_request_t; 82 struct hd_geometry { unsigned char heads; unsigned char sectors; unsigned short cylinders; unsigned long start; } ; 129 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 21 struct kvec { void *iov_base; size_t iov_len; } ; 27 union __anonunion____missing_field_name_301 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ; 27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_301 __annonCompField72; unsigned long nr_segs; } ; 38 struct kiocb ; 81 struct hlist_bl_node ; 81 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_322 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_321 { struct __anonstruct____missing_field_name_322 __annonCompField73; } ; 114 struct lockref { union __anonunion____missing_field_name_321 __annonCompField74; } ; 50 struct vfsmount ; 51 struct __anonstruct____missing_field_name_324 { u32 hash; u32 len; } ; 51 union __anonunion____missing_field_name_323 { struct __anonstruct____missing_field_name_324 __annonCompField75; u64 hash_len; } ; 51 struct qstr { union __anonunion____missing_field_name_323 __annonCompField76; const unsigned char *name; } ; 90 struct dentry_operations ; 90 union __anonunion_d_u_325 { struct hlist_node d_alias; struct callback_head d_rcu; } ; 90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_325 d_u; } ; 142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct inode * (*d_select_inode)(struct dentry *, unsigned int); } ; 586 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 27 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 67 struct __anonstruct____missing_field_name_329 { struct radix_tree_node *parent; void *private_data; } ; 67 union __anonunion____missing_field_name_328 { struct __anonstruct____missing_field_name_329 __annonCompField77; struct callback_head callback_head; } ; 67 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion____missing_field_name_328 __annonCompField78; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 114 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 437 struct semaphore { raw_spinlock_t lock; unsigned int count; struct list_head wait_list; } ; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 struct bio_set ; 45 struct bio ; 46 struct bio_integrity_payload ; 47 struct block_device ; 17 typedef void bio_end_io_t(struct bio *); 19 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 28 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ; 40 union __anonunion____missing_field_name_334 { struct bio_integrity_payload *bi_integrity; } ; 40 struct bio { struct bio *bi_next; struct block_device *bi_bdev; unsigned int bi_flags; int bi_error; unsigned long bi_rw; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; union __anonunion____missing_field_name_334 __annonCompField79; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ; 245 typedef unsigned int blk_qc_t; 268 struct delayed_call { void (*fn)(void *); void *arg; } ; 162 struct export_operations ; 163 struct poll_table_struct ; 164 struct kstatfs ; 165 struct swap_info_struct ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 212 struct dquot ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_335 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_335 kprojid_t; 166 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_336 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_336 __annonCompField80; enum quota_type type; } ; 184 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ; 206 struct quota_format_type ; 207 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 272 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 299 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ; 310 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); } ; 325 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 348 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 394 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 405 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 418 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 432 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 496 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 526 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *, loff_t ); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 424 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 445 struct request_queue ; 446 struct hd_struct ; 446 struct gendisk ; 446 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; int bd_map_count; } ; 565 struct posix_acl ; 566 struct inode_operations ; 566 union __anonunion____missing_field_name_341 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 566 union __anonunion____missing_field_name_342 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 566 struct file_lock_context ; 566 struct cdev ; 566 union __anonunion____missing_field_name_343 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; } ; 566 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_341 __annonCompField81; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; union __anonunion____missing_field_name_342 __annonCompField82; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_343 __annonCompField83; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ; 837 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 845 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 868 union __anonunion_f_u_344 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 868 struct file { union __anonunion_f_u_344 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 953 typedef void *fl_owner_t; 954 struct file_lock ; 955 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 961 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 988 struct nlm_lockowner ; 989 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_346 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_345 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_346 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_345 fl_u; } ; 1041 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1244 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1279 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1305 struct super_operations ; 1305 struct xattr_handler ; 1305 struct mtd_info ; 1305 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; } ; 1554 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1568 struct dir_context ; 1593 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1599 struct block_device_operations ; 1600 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1668 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1723 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 1962 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 18 typedef s32 compat_time_t; 39 typedef s32 compat_long_t; 42 typedef u32 compat_ulong_t; 44 typedef u32 compat_uptr_t; 45 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ; 276 struct compat_robust_list { compat_uptr_t next; } ; 280 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 53 struct kernel_param ; 58 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_365 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_365 __annonCompField84; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 38 struct module_param_attrs ; 38 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 48 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 290 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 297 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 304 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; struct mod_tree_node mtn; } ; 318 struct module_sect_attrs ; 318 struct module_notes_attrs ; 318 struct trace_event_call ; 318 struct trace_enum_map ; 318 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp_alive; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 81 struct disk_stats { unsigned long sectors[2U]; unsigned long ios[2U]; unsigned long merges[2U]; unsigned long ticks[2U]; unsigned long io_ticks; unsigned long time_in_queue; } ; 90 struct partition_meta_info { char uuid[37U]; u8 volname[64U]; } ; 102 struct hd_struct { sector_t start_sect; sector_t nr_sects; seqcount_t nr_sects_seq; sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; struct kobject *holder_dir; int policy; int partno; struct partition_meta_info *info; int make_it_fail; unsigned long stamp; atomic_t in_flight[2U]; struct disk_stats *dkstats; struct percpu_ref ref; struct callback_head callback_head; } ; 156 struct disk_part_tbl { struct callback_head callback_head; int len; struct hd_struct *last_lookup; struct hd_struct *part[]; } ; 163 struct disk_events ; 164 struct badblocks ; 165 struct blk_integrity_profile ; 165 struct blk_integrity { struct blk_integrity_profile *profile; unsigned char flags; unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; } ; 176 struct timer_rand_state ; 176 struct gendisk { int major; int first_minor; int minors; char disk_name[32U]; char * (*devnode)(struct gendisk *, umode_t *); unsigned int events; unsigned int async_events; struct disk_part_tbl *part_tbl; struct hd_struct part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; int flags; struct device *driverfs_dev; struct kobject *slave_dir; struct timer_rand_state *random; atomic_t sync_io; struct disk_events *ev; struct kobject integrity_kobj; int node_id; struct badblocks *bb; } ; 72 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ; 32 typedef int congested_fn(void *, int); 41 struct bdi_writeback_congested { unsigned long state; atomic_t refcnt; struct backing_dev_info *bdi; int blkcg_id; struct rb_node rb_node; } ; 60 union __anonunion____missing_field_name_372 { struct work_struct release_work; struct callback_head rcu; } ; 60 struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; struct percpu_counter stat[4U]; struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; union __anonunion____missing_field_name_372 __annonCompField85; } ; 134 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned int capabilities; congested_fn *congested_fn; void *congested_data; char *name; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; struct list_head wb_list; struct radix_tree_root cgwb_tree; struct rb_root cgwb_congested_tree; atomic_t usage_cnt; wait_queue_head_t wb_waitq; struct device *dev; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ; 11 typedef void * mempool_alloc_t(gfp_t , void *); 12 typedef void mempool_free_t(void *, void *); 13 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ; 24 typedef struct mempool_s mempool_t; 78 union __anonunion____missing_field_name_373 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ; 78 union __anonunion____missing_field_name_374 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ; 78 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_373 __annonCompField86; union __anonunion____missing_field_name_374 __annonCompField87; unsigned int flags; } ; 92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ; 320 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ; 551 struct bio_list { struct bio *head; struct bio *tail; } ; 672 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ; 64 struct bsg_class_device { struct device *class_dev; struct device *parent; int minor; struct request_queue *queue; struct kref ref; void (*release)(struct device *); } ; 22 struct elevator_queue ; 24 struct request ; 26 struct bsg_job ; 27 struct blkcg_gq ; 28 struct blk_flush_queue ; 29 struct pr_ops ; 51 typedef void rq_end_io_fn(struct request *, int); 52 struct request_list { struct request_queue *q; struct blkcg_gq *blkg; int count[2U]; int starved[2U]; mempool_t *rq_pool; wait_queue_head_t wait[2U]; unsigned int flags; } ; 77 union __anonunion____missing_field_name_375 { struct call_single_data csd; unsigned long fifo_time; } ; 77 struct blk_mq_ctx ; 77 union __anonunion____missing_field_name_376 { struct hlist_node hash; struct list_head ipi_list; } ; 77 union __anonunion____missing_field_name_377 { struct rb_node rb_node; void *completion_data; } ; 77 struct __anonstruct_elv_379 { struct io_cq *icq; void *priv[2U]; } ; 77 struct __anonstruct_flush_380 { unsigned int seq; struct list_head list; rq_end_io_fn *saved_end_io; } ; 77 union __anonunion____missing_field_name_378 { struct __anonstruct_elv_379 elv; struct __anonstruct_flush_380 flush; } ; 77 struct request { struct list_head queuelist; union __anonunion____missing_field_name_375 __annonCompField88; struct request_queue *q; struct blk_mq_ctx *mq_ctx; u64 cmd_flags; unsigned int cmd_type; unsigned long atomic_flags; int cpu; unsigned int __data_len; sector_t __sector; struct bio *bio; struct bio *biotail; union __anonunion____missing_field_name_376 __annonCompField89; union __anonunion____missing_field_name_377 __annonCompField90; union __anonunion____missing_field_name_378 __annonCompField91; struct gendisk *rq_disk; struct hd_struct *part; unsigned long start_time; struct request_list *rl; unsigned long long start_time_ns; unsigned long long io_start_time_ns; unsigned short nr_phys_segments; unsigned short nr_integrity_segments; unsigned short ioprio; void *special; int tag; int errors; unsigned char __cmd[16U]; unsigned char *cmd; unsigned short cmd_len; unsigned int extra_len; unsigned int sense_len; unsigned int resid_len; void *sense; unsigned long deadline; struct list_head timeout_list; unsigned int timeout; int retries; rq_end_io_fn *end_io; void *end_io_data; struct request *next_rq; } ; 117 struct elevator_type ; 12 typedef int elevator_merge_fn(struct request_queue *, struct request **, struct bio *); 15 typedef void elevator_merge_req_fn(struct request_queue *, struct request *, struct request *); 17 typedef void elevator_merged_fn(struct request_queue *, struct request *, int); 19 typedef int elevator_allow_merge_fn(struct request_queue *, struct request *, struct bio *); 21 typedef void elevator_bio_merged_fn(struct request_queue *, struct request *, struct bio *); 24 typedef int elevator_dispatch_fn(struct request_queue *, int); 26 typedef void elevator_add_req_fn(struct request_queue *, struct request *); 27 typedef struct request * elevator_request_list_fn(struct request_queue *, struct request *); 28 typedef void elevator_completed_req_fn(struct request_queue *, struct request *); 29 typedef int elevator_may_queue_fn(struct request_queue *, int); 31 typedef void elevator_init_icq_fn(struct io_cq *); 32 typedef void elevator_exit_icq_fn(struct io_cq *); 33 typedef int elevator_set_req_fn(struct request_queue *, struct request *, struct bio *, gfp_t ); 35 typedef void elevator_put_req_fn(struct request *); 36 typedef void elevator_activate_req_fn(struct request_queue *, struct request *); 37 typedef void elevator_deactivate_req_fn(struct request_queue *, struct request *); 39 typedef int elevator_init_fn(struct request_queue *, struct elevator_type *); 41 typedef void elevator_exit_fn(struct elevator_queue *); 42 typedef void elevator_registered_fn(struct request_queue *); 43 struct elevator_ops { elevator_merge_fn *elevator_merge_fn; elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; elevator_allow_merge_fn *elevator_allow_merge_fn; elevator_bio_merged_fn *elevator_bio_merged_fn; elevator_dispatch_fn *elevator_dispatch_fn; elevator_add_req_fn *elevator_add_req_fn; elevator_activate_req_fn *elevator_activate_req_fn; elevator_deactivate_req_fn *elevator_deactivate_req_fn; elevator_completed_req_fn *elevator_completed_req_fn; elevator_request_list_fn *elevator_former_req_fn; elevator_request_list_fn *elevator_latter_req_fn; elevator_init_icq_fn *elevator_init_icq_fn; elevator_exit_icq_fn *elevator_exit_icq_fn; elevator_set_req_fn *elevator_set_req_fn; elevator_put_req_fn *elevator_put_req_fn; elevator_may_queue_fn *elevator_may_queue_fn; elevator_init_fn *elevator_init_fn; elevator_exit_fn *elevator_exit_fn; elevator_registered_fn *elevator_registered_fn; } ; 74 struct elv_fs_entry { struct attribute attr; ssize_t (*show)(struct elevator_queue *, char *); ssize_t (*store)(struct elevator_queue *, const char *, size_t ); } ; 82 struct elevator_type { struct kmem_cache *icq_cache; struct elevator_ops ops; size_t icq_size; size_t icq_align; struct elv_fs_entry *elevator_attrs; char elevator_name[16U]; struct module *elevator_owner; char icq_cache_name[21U]; struct list_head list; } ; 103 struct elevator_queue { struct elevator_type *type; void *elevator_data; struct kobject kobj; struct mutex sysfs_lock; unsigned char registered; struct hlist_head hash[64U]; } ; 212 typedef void request_fn_proc(struct request_queue *); 213 typedef blk_qc_t make_request_fn(struct request_queue *, struct bio *); 214 typedef int prep_rq_fn(struct request_queue *, struct request *); 215 typedef void unprep_rq_fn(struct request_queue *, struct request *); 218 typedef void softirq_done_fn(struct request *); 219 typedef int dma_drain_needed_fn(struct request *); 220 typedef int lld_busy_fn(struct request_queue *); 221 typedef int bsg_job_fn(struct bsg_job *); 222 enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; 229 typedef enum blk_eh_timer_return rq_timed_out_fn(struct request *); 235 struct blk_queue_tag { struct request **tag_index; unsigned long *tag_map; int busy; int max_depth; int real_max_depth; atomic_t refcnt; int alloc_policy; int next_tag; } ; 246 struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; unsigned int max_hw_sectors; unsigned int max_dev_sectors; unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; unsigned int discard_granularity; unsigned int discard_alignment; unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; unsigned char misaligned; unsigned char discard_misaligned; unsigned char cluster; unsigned char discard_zeroes_data; unsigned char raid_partial_stripes_expensive; } ; 282 struct blk_mq_ops ; 282 struct blk_mq_hw_ctx ; 282 struct throtl_data ; 282 struct blk_mq_tag_set ; 282 struct request_queue { struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; int nr_rqs[2U]; int nr_rqs_elvpriv; struct request_list root_rl; request_fn_proc *request_fn; make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unprep_rq_fn *unprep_rq_fn; softirq_done_fn *softirq_done_fn; rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; lld_busy_fn *lld_busy_fn; struct blk_mq_ops *mq_ops; unsigned int *mq_map; struct blk_mq_ctx *queue_ctx; unsigned int nr_queues; struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; sector_t end_sector; struct request *boundary_rq; struct delayed_work delay_work; struct backing_dev_info backing_dev_info; void *queuedata; unsigned long queue_flags; int id; gfp_t bounce_gfp; spinlock_t __queue_lock; spinlock_t *queue_lock; struct kobject kobj; struct kobject mq_kobj; struct blk_integrity integrity; struct device *dev; int rpm_status; unsigned int nr_pending; unsigned long nr_requests; unsigned int nr_congestion_on; unsigned int nr_congestion_off; unsigned int nr_batching; unsigned int dma_drain_size; void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; struct list_head tag_busy_list; unsigned int nr_sorted; unsigned int in_flight[2U]; unsigned int request_fn_active; unsigned int rq_timeout; struct timer_list timeout; struct work_struct timeout_work; struct list_head timeout_list; struct list_head icq_list; unsigned long blkcg_pols[1U]; struct blkcg_gq *root_blkg; struct list_head blkg_list; struct queue_limits limits; unsigned int sg_timeout; unsigned int sg_reserved_size; int node; unsigned int flush_flags; unsigned char flush_not_queueable; struct blk_flush_queue *fq; struct list_head requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex sysfs_lock; int bypass_depth; atomic_t mq_freeze_depth; bsg_job_fn *bsg_job_fn; int bsg_job_size; struct bsg_class_device bsg_dev; struct throtl_data *td; struct callback_head callback_head; wait_queue_head_t mq_freeze_wq; struct percpu_ref q_usage_counter; struct list_head all_q_node; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; bool mq_sysfs_init_done; } ; 1029 struct blk_plug { struct list_head list; struct list_head mq_list; struct list_head cb_list; } ; 1446 struct blk_integrity_iter { void *prot_buf; void *data_buf; sector_t seed; unsigned int data_size; unsigned short interval; const char *disk_name; } ; 1475 typedef int integrity_processing_fn(struct blk_integrity_iter *); 1476 struct blk_integrity_profile { integrity_processing_fn *generate_fn; integrity_processing_fn *verify_fn; const char *name; } ; 1635 struct block_device_operations { int (*open)(struct block_device *, fmode_t ); void (*release)(struct gendisk *, fmode_t ); int (*rw_page)(struct block_device *, sector_t , struct page *, int); int (*ioctl)(struct block_device *, fmode_t , unsigned int, unsigned long); int (*compat_ioctl)(struct block_device *, fmode_t , unsigned int, unsigned long); long int (*direct_access)(struct block_device *, sector_t , void **, pfn_t *); unsigned int (*check_events)(struct gendisk *, unsigned int); int (*media_changed)(struct gendisk *); void (*unlock_native_capacity)(struct gendisk *); int (*revalidate_disk)(struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); void (*swap_slot_free_notify)(struct block_device *, unsigned long); struct module *owner; const struct pr_ops *pr_ops; } ; 1663 struct blk_mq_tags ; 1664 struct blk_mq_cpu_notifier { struct list_head list; void *data; int (*notify)(void *, unsigned long, unsigned int); } ; 14 struct blk_align_bitmap ; 14 struct blk_mq_ctxmap { unsigned int size; unsigned int bits_per_word; struct blk_align_bitmap *map; } ; 20 struct __anonstruct____missing_field_name_382 { spinlock_t lock; struct list_head dispatch; } ; 20 struct blk_mq_hw_ctx { struct __anonstruct____missing_field_name_382 __annonCompField92; unsigned long state; struct delayed_work run_work; struct delayed_work delay_work; cpumask_var_t cpumask; int next_cpu; int next_cpu_batch; unsigned long flags; struct request_queue *queue; struct blk_flush_queue *fq; void *driver_data; struct blk_mq_ctxmap ctx_map; unsigned int nr_ctx; struct blk_mq_ctx **ctxs; atomic_t wait_index; struct blk_mq_tags *tags; unsigned long queued; unsigned long run; unsigned long dispatched[10U]; unsigned int numa_node; unsigned int queue_num; atomic_t nr_active; struct blk_mq_cpu_notifier cpu_notifier; struct kobject kobj; unsigned long poll_invoked; unsigned long poll_success; } ; 66 struct blk_mq_tag_set { struct blk_mq_ops *ops; unsigned int nr_hw_queues; unsigned int queue_depth; unsigned int reserved_tags; unsigned int cmd_size; int numa_node; unsigned int timeout; unsigned int flags; void *driver_data; struct blk_mq_tags **tags; struct mutex tag_list_lock; struct list_head tag_list; } ; 83 struct blk_mq_queue_data { struct request *rq; struct list_head *list; bool last; } ; 90 typedef int queue_rq_fn(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 91 typedef struct blk_mq_hw_ctx * map_queue_fn(struct request_queue *, const int); 92 typedef enum blk_eh_timer_return timeout_fn(struct request *, bool ); 93 typedef int init_hctx_fn(struct blk_mq_hw_ctx *, void *, unsigned int); 94 typedef void exit_hctx_fn(struct blk_mq_hw_ctx *, unsigned int); 95 typedef int init_request_fn(void *, struct request *, unsigned int, unsigned int, unsigned int); 97 typedef void exit_request_fn(void *, struct request *, unsigned int, unsigned int); 103 typedef int poll_fn(struct blk_mq_hw_ctx *, unsigned int); 104 struct blk_mq_ops { queue_rq_fn *queue_rq; map_queue_fn *map_queue; timeout_fn *timeout; poll_fn *poll; softirq_done_fn *complete; init_hctx_fn *init_hctx; exit_hctx_fn *exit_hctx; init_request_fn *init_request; exit_request_fn *exit_request; } ; 933 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ; 942 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 162 typedef u64 acpi_io_address; 450 typedef void *acpi_handle; 644 typedef u32 acpi_object_type; 901 struct __anonstruct_integer_389 { acpi_object_type type; u64 value; } ; 901 struct __anonstruct_string_390 { acpi_object_type type; u32 length; char *pointer; } ; 901 struct __anonstruct_buffer_391 { acpi_object_type type; u32 length; u8 *pointer; } ; 901 struct __anonstruct_package_392 { acpi_object_type type; u32 count; union acpi_object *elements; } ; 901 struct __anonstruct_reference_393 { acpi_object_type type; acpi_object_type actual_type; acpi_handle handle; } ; 901 struct __anonstruct_processor_394 { acpi_object_type type; u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } ; 901 struct __anonstruct_power_resource_395 { acpi_object_type type; u32 system_level; u32 resource_order; } ; 901 union acpi_object { acpi_object_type type; struct __anonstruct_integer_389 integer; struct __anonstruct_string_390 string; struct __anonstruct_buffer_391 buffer; struct __anonstruct_package_392 package; struct __anonstruct_reference_393 reference; struct __anonstruct_processor_394 processor; struct __anonstruct_power_resource_395 power_resource; } ; 108 struct acpi_driver ; 109 struct acpi_hotplug_profile { struct kobject kobj; int (*scan_dependent)(struct acpi_device *); void (*notify_online)(struct acpi_device *); bool enabled; bool demand_offline; } ; 131 struct acpi_scan_handler { const struct acpi_device_id *ids; struct list_head list_node; bool (*match)(const char *, const struct acpi_device_id **); int (*attach)(struct acpi_device *, const struct acpi_device_id *); void (*detach)(struct acpi_device *); void (*bind)(struct device *); void (*unbind)(struct device *); struct acpi_hotplug_profile hotplug; } ; 141 struct acpi_hotplug_context { struct acpi_device *self; int (*notify)(struct acpi_device *, u32 ); void (*uevent)(struct acpi_device *, u32 ); void (*fixup)(struct acpi_device *); } ; 162 struct acpi_device_ops { int (*add)(struct acpi_device *); int (*remove)(struct acpi_device *); void (*notify)(struct acpi_device *, u32 ); } ; 168 struct acpi_driver { char name[80U]; char class[80U]; const struct acpi_device_id *ids; unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; struct module *owner; } ; 180 struct acpi_device_status { unsigned char present; unsigned char enabled; unsigned char show_in_ui; unsigned char functional; unsigned char battery_present; unsigned int reserved; } ; 196 struct acpi_device_flags { unsigned char dynamic_status; unsigned char removable; unsigned char ejectable; unsigned char power_manageable; unsigned char match_driver; unsigned char initialized; unsigned char visited; unsigned char hotplug_notify; unsigned char is_dock_station; unsigned char of_compatible_ok; unsigned char coherent_dma; unsigned char cca_seen; unsigned int reserved; } ; 214 struct acpi_device_dir { struct proc_dir_entry *entry; } ; 225 typedef char acpi_bus_id[8U]; 226 typedef unsigned long acpi_bus_address; 227 typedef char acpi_device_name[40U]; 228 typedef char acpi_device_class[20U]; 234 struct acpi_pnp_type { unsigned char hardware_id; unsigned char bus_address; unsigned char platform_id; unsigned int reserved; } ; 241 struct acpi_device_pnp { acpi_bus_id bus_id; struct acpi_pnp_type type; acpi_bus_address bus_address; char *unique_id; struct list_head ids; acpi_device_name device_name; acpi_device_class device_class; union acpi_object *str_obj; } ; 256 struct acpi_device_power_flags { unsigned char explicit_get; unsigned char power_resources; unsigned char inrush_current; unsigned char power_removed; unsigned char ignore_parent; unsigned char dsw_present; unsigned int reserved; } ; 271 struct __anonstruct_flags_396 { unsigned char valid; unsigned char explicit_set; unsigned char reserved; } ; 271 struct acpi_device_power_state { struct __anonstruct_flags_396 flags; int power; int latency; struct list_head resources; } ; 282 struct acpi_device_power { int state; struct acpi_device_power_flags flags; struct acpi_device_power_state states[5U]; } ; 288 struct acpi_device_perf_flags { u8 reserved; } ; 294 struct __anonstruct_flags_397 { unsigned char valid; unsigned char reserved; } ; 294 struct acpi_device_perf_state { struct __anonstruct_flags_397 flags; u8 power; u8 performance; int latency; } ; 304 struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; } ; 311 struct acpi_device_wakeup_flags { unsigned char valid; unsigned char run_wake; unsigned char notifier_present; unsigned char enabled; } ; 319 struct acpi_device_wakeup_context { struct work_struct work; struct device *dev; } ; 324 struct acpi_device_wakeup { acpi_handle gpe_device; u64 gpe_number; u64 sleep_state; struct list_head resources; struct acpi_device_wakeup_flags flags; struct acpi_device_wakeup_context context; struct wakeup_source *ws; int prepare_count; } ; 342 struct acpi_device_data { const union acpi_object *pointer; const union acpi_object *properties; const union acpi_object *of_compatible; struct list_head subnodes; } ; 350 struct acpi_gpio_mapping ; 351 struct acpi_device { int device_type; acpi_handle handle; struct fwnode_handle fwnode; struct acpi_device *parent; struct list_head children; struct list_head node; struct list_head wakeup_list; struct list_head del_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; struct acpi_driver *driver; const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; unsigned int physical_node_count; unsigned int dep_unmet; struct list_head physical_node_list; struct mutex physical_node_lock; void (*remove)(struct acpi_device *); } ; 769 struct acpi_gpio_params { unsigned int crs_entry_index; unsigned int line_index; bool active_low; } ; 822 struct acpi_gpio_mapping { const char *name; const struct acpi_gpio_params *data; unsigned int size; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 77 struct smart_attr { u8 attr_id; u16 flags; u8 cur; u8 worst; u32 data; u8 res[3U]; } ; 173 struct mtip_work { struct work_struct work; void *port; int cpu_binding; u32 completed; } ; 180 struct mtip_trim_entry { u32 lba; u16 rsvd; u16 range; } ; 202 union __anonunion____missing_field_name_407 { unsigned char lba_low; unsigned char sector; } ; 202 union __anonunion____missing_field_name_408 { unsigned char lba_mid; unsigned char cyl_low; } ; 202 union __anonunion____missing_field_name_409 { unsigned char lba_hi; unsigned char cyl_hi; } ; 202 union __anonunion____missing_field_name_410 { unsigned char device; unsigned char head; } ; 202 union __anonunion____missing_field_name_411 { unsigned char lba_low_ex; unsigned char sector_ex; } ; 202 union __anonunion____missing_field_name_412 { unsigned char lba_mid_ex; unsigned char cyl_low_ex; } ; 202 union __anonunion____missing_field_name_413 { unsigned char lba_hi_ex; unsigned char cyl_hi_ex; } ; 202 struct host_to_dev_fis { unsigned char type; unsigned char opts; unsigned char command; unsigned char features; union __anonunion____missing_field_name_407 __annonCompField97; union __anonunion____missing_field_name_408 __annonCompField98; union __anonunion____missing_field_name_409 __annonCompField99; union __anonunion____missing_field_name_410 __annonCompField100; union __anonunion____missing_field_name_411 __annonCompField101; union __anonunion____missing_field_name_412 __annonCompField102; union __anonunion____missing_field_name_413 __annonCompField103; unsigned char features_ex; unsigned char sect_count; unsigned char sect_cnt_ex; unsigned char res2; unsigned char control; unsigned int res3; } ; 259 union __anonunion____missing_field_name_414 { unsigned int byte_count; unsigned int status; } ; 259 struct mtip_cmd_hdr { unsigned int opts; union __anonunion____missing_field_name_414 __annonCompField104; unsigned int ctba; unsigned int ctbau; unsigned int res[4U]; } ; 290 struct mtip_cmd_sg { unsigned int dba; unsigned int dba_upper; unsigned int reserved; unsigned int info; } ; 314 struct mtip_port ; 315 struct mtip_cmd { struct mtip_cmd_hdr *command_header; dma_addr_t command_header_dma; void *command; dma_addr_t command_dma; void *comp_data; void (*comp_func)(struct mtip_port *, int, struct mtip_cmd *, int); int scatter_ents; int unaligned; struct scatterlist sg[504U]; int retries; int direction; } ; 347 struct driver_data ; 347 struct mtip_port { struct driver_data *dd; unsigned long identify_valid; void *mmio; void *s_active[8U]; void *completed[8U]; void *cmd_issue[8U]; void *command_list; dma_addr_t command_list_dma; void *rxfis; dma_addr_t rxfis_dma; void *block1; dma_addr_t block1_dma; u16 *identify; dma_addr_t identify_dma; u16 *sector_buffer; dma_addr_t sector_buffer_dma; u16 *log_buf; dma_addr_t log_buf_dma; u8 *smart_buf; dma_addr_t smart_buf_dma; unsigned long cmds_to_issue[4U]; wait_queue_head_t svc_wait; unsigned long flags; unsigned long ic_pause_timer; struct semaphore cmd_slot_unal; spinlock_t cmd_issue_lock[8U]; } ; 443 struct driver_data { void *mmio; int major; int instance; struct gendisk *disk; struct pci_dev *pdev; struct request_queue *queue; struct blk_mq_tag_set tags; struct mtip_port *port; unsigned int product_type; unsigned int slot_groups; unsigned long index; unsigned long dd_flag; struct task_struct *mtip_svc_handler; struct dentry *dfs_node; bool trim_supp; bool sr; int numa_node; char workq_name[32U]; struct workqueue_struct *isr_workq; atomic_t irq_workers_active; struct mtip_work work[8U]; int isr_binding; struct block_device *bdev; struct list_head online_list; struct list_head remove_list; int unal_qdepth; } ; 126 struct mtip_compat_ide_task_request_s { __u8 io_ports[8U]; __u8 hob_ports[8U]; ide_reg_valid_t out_flags; ide_reg_valid_t in_flags; int data_phase; int req_cmd; compat_ulong_t out_size; compat_ulong_t in_size; } ; 38 typedef int Set; 1 long int __builtin_expect(long, long); 1 void __builtin_prefetch(const void *, ...); 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 unsigned long int __builtin_object_size(void *, int); 216 void __read_once_size(const volatile void *p, void *res, int size); 241 void __write_once_size(volatile void *p, void *res, int size); 7 dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 12 void * memdup_user(const void *, size_t ); 31 void * __memcpy(void *, const void *, size_t ); 56 void * __memset(void *, int, size_t ); 60 void * __memmove(void *, const void *, size_t ); 63 size_t strlen(const char *); 27 size_t strlcpy(char *, const char *, size_t ); 33 extern struct module __this_module; 72 void set_bit(long nr, volatile unsigned long *addr); 110 void clear_bit(long nr, volatile unsigned long *addr); 308 int constant_test_bit(long nr, const volatile unsigned long *addr); 314 int variable_test_bit(long nr, const volatile unsigned long *addr); 14 unsigned long int find_next_bit(const unsigned long *, unsigned long, unsigned long); 42 unsigned long int find_first_bit(const unsigned long *, unsigned long); 46 __u16 __fswab16(__u16 val); 154 __u16 __swab16p(const __u16 *p); 223 void __swab16s(__u16 *p); 142 int printk(const char *, ...); 53 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 178 void __might_sleep(const char *, int, int); 248 void __might_fault(const char *, int); 403 int sprintf(char *, const char *, ...); 406 int snprintf(char *, size_t , const char *, ...); 25 void INIT_LIST_HEAD(struct list_head *list); 48 void __list_add(struct list_head *, struct list_head *, struct list_head *); 61 void list_add(struct list_head *new, struct list_head *head); 112 void __list_del_entry(struct list_head *); 154 void list_del_init(struct list_head *entry); 28 long int PTR_ERR(const void *ptr); 33 bool IS_ERR(const void *ptr); 38 bool IS_ERR_OR_NULL(const void *ptr); 87 void __bad_percpu_size(); 295 void __bad_size_call_parameter(); 18 extern unsigned long __per_cpu_offset[8192U]; 15 void __xadd_wrong_size(); 24 int atomic_read(const atomic_t *v); 36 void atomic_set(atomic_t *v, int i); 154 int atomic_add_return(int i, atomic_t *v); 166 int atomic_sub_return(int i, atomic_t *v); 71 void warn_slowpath_null(const char *, const int); 280 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int); 112 int __bitmap_weight(const unsigned long *, unsigned int); 282 int bitmap_empty(const unsigned long *src, unsigned int nbits); 298 int bitmap_weight(const unsigned long *src, unsigned int nbits); 37 extern int nr_cpu_ids; 90 extern struct cpumask __cpu_present_mask; 118 unsigned int cpumask_check(unsigned int cpu); 176 unsigned int cpumask_first(const struct cpumask *srcp); 188 unsigned int cpumask_next(int n, const struct cpumask *srcp); 460 bool cpumask_empty(const struct cpumask *srcp); 478 unsigned int cpumask_weight(const struct cpumask *srcp); 793 extern const unsigned long cpu_bit_bitmap[65U][128U]; 795 const struct cpumask * get_cpu_mask(unsigned int cpu); 163 extern struct cpuinfo_x86 cpu_info; 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 41 void _raw_spin_unlock(raw_spinlock_t *); 45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 289 raw_spinlock_t * spinlock_check(spinlock_t *lock); 300 void spin_lock(spinlock_t *lock); 345 void spin_unlock(spinlock_t *lock); 360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 72 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *); 200 void __wake_up(wait_queue_head_t *, unsigned int, int, void *); 977 long int prepare_to_wait_event(wait_queue_head_t *, wait_queue_t *, int); 978 void finish_wait(wait_queue_head_t *, wait_queue_t *); 73 void init_completion(struct completion *x); 99 long int wait_for_completion_interruptible_timeout(struct completion *, unsigned long); 106 void complete(struct completion *); 78 extern volatile unsigned long jiffies; 284 unsigned int jiffies_to_msecs(const unsigned long); 292 unsigned long int __msecs_to_jiffies(const unsigned int); 354 unsigned long int msecs_to_jiffies(const unsigned int m); 176 int ida_pre_get(struct ida *, gfp_t ); 177 int ida_get_new_above(struct ida *, int, int *); 178 void ida_remove(struct ida *, int); 193 int ida_get_new(struct ida *ida, int *p_id); 228 int sysfs_create_file_ns(struct kobject *, const struct attribute *, const void *); 235 void sysfs_remove_file_ns(struct kobject *, const struct attribute *, const void *); 491 int sysfs_create_file(struct kobject *kobj, const struct attribute *attr); 497 void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); 181 void __init_work(struct work_struct *, int); 361 struct workqueue_struct * __alloc_workqueue_key(const char *, unsigned int, int, struct lock_class_key *, const char *, ...); 421 void destroy_workqueue(struct workqueue_struct *); 429 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *); 436 void flush_workqueue(struct workqueue_struct *); 87 const char * kobject_name(const struct kobject *kobj); 110 struct kobject * kobject_get(struct kobject *); 111 void kobject_put(struct kobject *); 251 int __first_node(const nodemask_t *srcp); 257 int __next_node(int n, const nodemask_t *srcp); 395 extern nodemask_t node_states[5U]; 398 int node_state(int node, enum node_states state); 423 int next_online_node(int nid); 57 int __cpu_to_node(int); 76 const struct cpumask * cpumask_of_node(int); 58 unsigned int readl(const volatile void *addr); 66 void writel(unsigned int val, volatile void *addr); 29 extern int cpu_number; 850 const char * dev_name(const struct device *dev); 863 int dev_to_node(struct device *dev); 897 void * dev_get_drvdata(const struct device *dev); 902 void dev_set_drvdata(struct device *dev, void *data); 1099 const char * dev_driver_string(const struct device *); 1120 void dev_err(const struct device *, const char *, ...); 1122 void dev_warn(const struct device *, const char *, ...); 1126 void _dev_info(const struct device *, const char *, ...); 209 bool capable(int); 430 void schedule(); 2470 int wake_up_process(struct task_struct *); 148 void kfree(const void *); 312 void * __kmalloc(size_t , gfp_t ); 327 void * __kmalloc_node(size_t , gfp_t , int); 451 void * kmalloc(size_t size, gfp_t flags); 491 void * kmalloc_node(size_t size, gfp_t flags, int node); 605 void * kzalloc(size_t size, gfp_t flags); 616 void * kzalloc_node(size_t size, gfp_t flags, int node); 858 int pci_find_capability(struct pci_dev *, int); 884 int pci_bus_read_config_word(struct pci_bus *, unsigned int, int, u16 *); 890 int pci_bus_write_config_word(struct pci_bus *, unsigned int, int, u16 ); 910 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val); 923 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); 978 int pcim_enable_device(struct pci_dev *); 1008 void pci_disable_device(struct pci_dev *); 1011 void pci_set_master(struct pci_dev *); 1066 int pci_save_state(struct pci_dev *); 1067 void pci_restore_state(struct pci_dev *); 1080 int pci_set_power_state(struct pci_dev *, pci_power_t ); 1182 int __pci_register_driver(struct pci_driver *, struct module *, const char *); 1191 void pci_unregister_driver(struct pci_driver *); 926 void * lowmem_page_address(const struct page *page); 120 struct page * sg_page(struct scatterlist *sg); 239 void * sg_virt(struct scatterlist *sg); 246 struct scatterlist * sg_next(struct scatterlist *); 248 void sg_init_table(struct scatterlist *, unsigned int); 1263 void pci_disable_msi(struct pci_dev *); 1270 int pci_enable_msi_range(struct pci_dev *, int, int); 1271 int pci_enable_msi_exact(struct pci_dev *dev, int nvec); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 47 void debug_dma_map_sg(struct device *, struct scatterlist *, int, int, int); 50 void debug_dma_unmap_sg(struct device *, struct scatterlist *, int, int); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 77 int valid_dma_direction(int dma_direction); 29 extern struct dma_map_ops *dma_ops; 31 struct dma_map_ops * get_dma_ops(struct device *dev); 47 int dma_supported(struct device *, u64 ); 143 dma_addr_t ldv_dma_map_single_attrs_1(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 144 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 161 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); 179 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); 458 int dma_set_mask(struct device *dev, u64 mask); 482 int dma_set_coherent_mask(struct device *dev, u64 mask); 617 void * dmam_alloc_coherent(struct device *, size_t , dma_addr_t *, gfp_t ); 619 void dmam_free_coherent(struct device *, size_t , void *, dma_addr_t ); 32 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); 38 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); 107 int pci_set_dma_mask(struct pci_dev *dev, u64 mask); 112 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); 116 int __pcibus_to_node(const struct pci_bus *bus); 1535 void * pci_get_drvdata(struct pci_dev *pdev); 1540 void pci_set_drvdata(struct pci_dev *pdev, void *data); 1684 const void ** pcim_iomap_table(struct pci_dev *); 1685 int pcim_iomap_regions(struct pci_dev *, int, const char *); 1688 void pcim_iounmap_regions(struct pci_dev *, int); 667 unsigned long int _copy_from_user(void *, const void *, unsigned int); 669 unsigned long int _copy_to_user(void *, const void *, unsigned int); 689 void __copy_from_user_overflow(); 694 void __copy_to_user_overflow(); 710 unsigned long int copy_from_user(void *to, const void *from, unsigned long n); 745 unsigned long int copy_to_user(void *to, const void *from, unsigned long n); 11 void synchronize_irq(unsigned int); 154 int devm_request_threaded_irq(struct device *, unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 160 int devm_request_irq(struct device *dev, unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long irqflags, const char *devname, void *dev_id); 172 void devm_free_irq(struct device *, unsigned int, void *); 266 int irq_set_affinity_hint(unsigned int, const struct cpumask *); 10 void __const_udelay(unsigned long); 46 void msleep(unsigned int); 50 void ssleep(unsigned int seconds); 32 void sema_init(struct semaphore *sem, int val); 42 int down_trylock(struct semaphore *); 44 void up(struct semaphore *); 2297 int register_blkdev(unsigned int, const char *); 2298 void unregister_blkdev(unsigned int, const char *); 2303 void bdput(struct block_device *); 2709 loff_t no_llseek(struct file *, loff_t , int); 2849 int simple_open(struct inode *, struct file *); 434 void add_disk(struct gendisk *); 435 void del_gendisk(struct gendisk *); 437 struct block_device * bdget_disk(struct gendisk *, int); 464 void set_capacity(struct gendisk *disk, sector_t size); 633 struct gendisk * alloc_disk_node(int, int); 636 void put_disk(struct gendisk *); 770 void blk_put_request(struct request *); 838 sector_t blk_rq_pos(const struct request *rq); 843 unsigned int blk_rq_bytes(const struct request *rq); 855 unsigned int blk_rq_sectors(const struct request *rq); 963 void blk_cleanup_queue(struct request_queue *); 965 void blk_queue_bounce_limit(struct request_queue *, u64 ); 966 void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 968 void blk_queue_max_segments(struct request_queue *, unsigned short); 969 void blk_queue_max_segment_size(struct request_queue *, unsigned int); 970 void blk_queue_max_discard_sectors(struct request_queue *, unsigned int); 975 void blk_queue_physical_block_size(struct request_queue *, unsigned int); 979 void blk_queue_io_min(struct request_queue *, unsigned int); 1006 void blk_queue_flush(struct request_queue *, unsigned int); 1010 int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 176 struct request_queue * blk_mq_init_queue(struct blk_mq_tag_set *); 182 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *); 183 void blk_mq_free_tag_set(struct blk_mq_tag_set *); 197 struct request * blk_mq_alloc_request(struct request_queue *, int, unsigned int); 199 struct request * blk_mq_tag_to_rq(struct blk_mq_tags *, unsigned int); 219 struct blk_mq_hw_ctx * blk_mq_map_queue(struct request_queue *, const int); 223 void blk_mq_start_request(struct request *); 224 void blk_mq_end_request(struct request *, int); 236 void blk_mq_stop_hw_queues(struct request_queue *); 238 void blk_mq_start_stopped_hw_queues(struct request_queue *, bool ); 251 struct request * blk_mq_rq_from_pdu(void *pdu); 255 void * blk_mq_rq_to_pdu(struct request *rq); 8 struct task_struct * kthread_create_on_node(int (*)(void *), void *, int, const char *, ...); 42 int kthread_stop(struct task_struct *); 43 bool kthread_should_stop(); 49 struct dentry * debugfs_create_file(const char *, umode_t , struct dentry *, void *, const struct file_operations *); 58 struct dentry * debugfs_create_dir(const char *, struct dentry *); 68 void debugfs_remove(struct dentry *); 69 void debugfs_remove_recursive(struct dentry *); 106 int instance = 0; 108 struct list_head online_list = { }; 109 struct list_head removing_list = { }; 110 struct spinlock dev_lock = { }; 116 int mtip_major = 0; 117 struct dentry *dfs_parent = 0; 118 struct dentry *dfs_device_status = 0; 120 u32 cpu_use[8192U] = { }; 122 struct spinlock rssd_index_lock = { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "rssd_index_lock", 0, 0UL } } } }; 123 struct ida rssd_index_ida = { { 0, 0, 0, 0, { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "(rssd_index_ida).idr.lock", 0, 0UL } } } }, 0, 0 }, (struct ida_bitmap *)0 }; 125 int mtip_block_initialize(struct driver_data *dd); 150 bool mtip_check_surprise_removal(struct pci_dev *pdev); 173 struct mtip_cmd * mtip_get_int_command(struct driver_data *dd); 181 void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd); 189 struct request * mtip_rq_from_tag(struct driver_data *dd, unsigned int tag); 197 struct mtip_cmd * mtip_cmd_from_tag(struct driver_data *dd, unsigned int tag); 223 void mtip_async_complete(struct mtip_port *port, int tag, struct mtip_cmd *cmd, int status); 257 int mtip_hba_reset(struct driver_data *dd); 298 void mtip_issue_ncq_command(struct mtip_port *port, int tag); 320 int mtip_enable_fis(struct mtip_port *port, int enable); 346 int mtip_enable_engine(struct mtip_port *port, int enable); 367 void mtip_start_port(struct mtip_port *port); 385 void mtip_deinit_port(struct mtip_port *port); 410 void mtip_init_port(struct mtip_port *port); 453 void mtip_restart_port(struct mtip_port *port); 523 int mtip_device_reset(struct driver_data *dd); 546 void print_tags(struct driver_data *dd, char *msg, unsigned long *tagbits, int cnt); 577 void mtip_completion(struct mtip_port *port, int tag, struct mtip_cmd *command, int status); 588 void mtip_null_completion(struct mtip_port *port, int tag, struct mtip_cmd *command, int status); 593 int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, dma_addr_t buffer_dma, unsigned int sectors); 595 int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, struct smart_attr *attrib); 605 void mtip_handle_tfe(struct driver_data *dd); 785 void mtip_workq_sdbfx(struct mtip_port *port, int group, u32 completed); 833 void mtip_process_legacy(struct driver_data *dd, u32 port_stat); 853 void mtip_process_errors(struct driver_data *dd, u32 port_stat); 881 irqreturn_t mtip_handle_irq(struct driver_data *data); 973 irqreturn_t mtip_irq_handler(int irq, void *instance___0); 980 void mtip_issue_non_ncq_command(struct mtip_port *port, int tag); 986 bool mtip_pause_ncq(struct mtip_port *port, struct host_to_dev_fis *fis); 1030 int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout); 1089 int mtip_exec_internal_command(struct mtip_port *port, struct host_to_dev_fis *fis, int fis_len, dma_addr_t buffer, int buf_len, u32 opts, gfp_t atomic, unsigned long timeout); 1276 void ata_swap_string(u16 *buf, unsigned int len); 1283 void mtip_set_timeout(struct driver_data *dd, struct host_to_dev_fis *fis, unsigned int *timeout, u8 erasemode); 1333 int mtip_get_identify(struct mtip_port *port, void *user_buffer); 1424 int mtip_standby_immediate(struct mtip_port *port); 1506 int mtip_get_smart_data(struct mtip_port *port, u8 *buffer, dma_addr_t buffer_dma); 1599 int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len); 1673 bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors); 1694 void mtip_dump_identify(struct mtip_port *port); 1749 void fill_command_sg(struct driver_data *dd, struct mtip_cmd *command, int nents); 1782 int exec_drive_task(struct mtip_port *port, u8 *command); 1852 int exec_drive_command(struct mtip_port *port, u8 *command, void *user_buffer); 1962 unsigned int implicit_sector(unsigned char command, unsigned char features); 2001 int exec_drive_taskfile(struct driver_data *dd, void *buf, ide_task_request_t *req_task, int outtotal); 2259 int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, unsigned long arg); 2363 void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, struct mtip_cmd *command, int nents, struct blk_mq_hw_ctx *hctx); 2451 ssize_t mtip_hw_show_status(struct device *dev, struct device_attribute *attr, char *buf); 2468 struct device_attribute dev_attr_status = { { "status", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &mtip_hw_show_status, (ssize_t (*)(struct device *, struct device_attribute *, const char *, size_t ))0 }; 2472 ssize_t show_device_status(struct device_driver *drv, char *buf); 2545 ssize_t mtip_hw_read_device_status(struct file *f, char *ubuf, size_t len, loff_t *offset); 2574 ssize_t mtip_hw_read_registers(struct file *f, char *ubuf, size_t len, loff_t *offset); 2641 ssize_t mtip_hw_read_flags(struct file *f, char *ubuf, size_t len, loff_t *offset); 2673 const struct file_operations mtip_device_status_fops = { &__this_module, &no_llseek, &mtip_hw_read_device_status, 0, 0, 0, 0, 0, 0, 0, 0, &simple_open, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 2680 const struct file_operations mtip_regs_fops = { &__this_module, &no_llseek, &mtip_hw_read_registers, 0, 0, 0, 0, 0, 0, 0, 0, &simple_open, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 2687 const struct file_operations mtip_flags_fops = { &__this_module, &no_llseek, &mtip_hw_read_flags, 0, 0, 0, 0, 0, 0, 0, 0, &simple_open, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 2704 int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj); 2725 int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj); 2735 int mtip_hw_debugfs_init(struct driver_data *dd); 2757 void mtip_hw_debugfs_exit(struct driver_data *dd); 2771 void hba_setup(struct driver_data *dd); 2783 int mtip_device_unaligned_constrained(struct driver_data *dd); 2798 void mtip_detect_product(struct driver_data *dd); 2848 int mtip_ftl_rebuild_poll(struct driver_data *dd); 2903 int mtip_service_thread(void *data); 2994 void mtip_dma_free(struct driver_data *dd); 3016 int mtip_dma_alloc(struct driver_data *dd); 3054 int mtip_hw_get_identify(struct driver_data *dd); 3118 int mtip_hw_init(struct driver_data *dd); 3267 void mtip_standby_drive(struct driver_data *dd); 3291 int mtip_hw_exit(struct driver_data *dd); 3332 int mtip_hw_shutdown(struct driver_data *dd); 3356 int mtip_hw_suspend(struct driver_data *dd); 3388 int mtip_hw_resume(struct driver_data *dd); 3418 int rssd_disk_name_format(char *prefix, int index, char *buf, int buflen); 3458 int mtip_block_ioctl(struct block_device *dev, fmode_t mode, unsigned int cmd, unsigned long arg); 3496 int mtip_block_compat_ioctl(struct block_device *dev, fmode_t mode, unsigned int cmd, unsigned long arg); 3578 int mtip_block_getgeo(struct block_device *dev, struct hd_geometry *geo); 3606 const struct block_device_operations mtip_block_ops = { 0, 0, 0, &mtip_block_ioctl, &mtip_block_compat_ioctl, 0, 0, 0, 0, 0, &mtip_block_getgeo, 0, &__this_module, 0 }; 3615 bool is_se_active(struct driver_data *dd); 3646 int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq); 3690 bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, struct request *rq); 3714 int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd); 3733 void mtip_free_cmd(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx); 3746 int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx, unsigned int numa_node___0); 3783 struct blk_mq_ops mtip_mq_ops = { &mtip_queue_rq, &blk_mq_map_queue, 0, 0, 0, 0, 0, &mtip_init_cmd, &mtip_free_cmd }; 4008 int mtip_block_remove(struct driver_data *dd); 4076 int mtip_block_shutdown(struct driver_data *dd); 4101 int mtip_block_suspend(struct driver_data *dd); 4109 int mtip_block_resume(struct driver_data *dd); 4117 void drop_cpu(int cpu); 4122 int get_least_used_cpu_on_node(int node); 4143 int mtip_get_next_rr_node(); 4158 void mtip_workq_sdbf0(struct work_struct *work); 4159 void mtip_workq_sdbf1(struct work_struct *work); 4160 void mtip_workq_sdbf2(struct work_struct *work); 4161 void mtip_workq_sdbf3(struct work_struct *work); 4162 void mtip_workq_sdbf4(struct work_struct *work); 4163 void mtip_workq_sdbf5(struct work_struct *work); 4164 void mtip_workq_sdbf6(struct work_struct *work); 4165 void mtip_workq_sdbf7(struct work_struct *work); 4167 void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev); 4191 void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev); 4227 int mtip_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 4425 void mtip_pci_remove(struct pci_dev *pdev); 4483 int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg); 4524 int mtip_pci_resume(struct pci_dev *pdev); 4571 void mtip_pci_shutdown(struct pci_dev *pdev); 4579 const struct pci_device_id mtip_pci_tbl[8U] = { { 4932U, 20816U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4932U, 20817U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4932U, 20818U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4932U, 20819U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4932U, 20832U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4932U, 20833U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4932U, 20835U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } }; 4591 struct pci_driver mtip_pci_driver = { { 0, 0 }, "mtip32xx", (const struct pci_device_id *)(&mtip_pci_tbl), &mtip_pci_probe, &mtip_pci_remove, &mtip_pci_suspend, 0, 0, &mtip_pci_resume, &mtip_pci_shutdown, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } }; 4601 const struct pci_device_id __mod_pci__mtip_pci_tbl_device_table[8U] = { }; 4613 int mtip_init(); 4668 void mtip_exit(); 4703 void ldv_check_final_state(); 4706 void ldv_check_return_value(int); 4709 void ldv_check_return_value_probe(int); 4712 void ldv_initialize(); 4715 void ldv_handler_precall(); 4718 int nondet_int(); 4721 int LDV_IN_INTERRUPT = 0; 4724 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 25 int ldv_undef_int(); 26 void * ldv_undef_ptr(); 8 int LDV_DMA_MAP_CALLS = 0; 11 dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir); 25 int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 41 dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir); return ; } { 4726 struct file *var_group1; 4727 char *var_mtip_hw_read_device_status_47_p1; 4728 unsigned long var_mtip_hw_read_device_status_47_p2; 4729 loff_t *var_mtip_hw_read_device_status_47_p3; 4730 long res_mtip_hw_read_device_status_47; 4731 char *var_mtip_hw_read_registers_48_p1; 4732 unsigned long var_mtip_hw_read_registers_48_p2; 4733 loff_t *var_mtip_hw_read_registers_48_p3; 4734 long res_mtip_hw_read_registers_48; 4735 char *var_mtip_hw_read_flags_49_p1; 4736 unsigned long var_mtip_hw_read_flags_49_p2; 4737 loff_t *var_mtip_hw_read_flags_49_p3; 4738 long res_mtip_hw_read_flags_49; 4739 struct block_device *var_group2; 4740 unsigned int var_mtip_block_ioctl_69_p1; 4741 unsigned int var_mtip_block_ioctl_69_p2; 4742 unsigned long var_mtip_block_ioctl_69_p3; 4743 unsigned int var_mtip_block_compat_ioctl_70_p1; 4744 unsigned int var_mtip_block_compat_ioctl_70_p2; 4745 unsigned long var_mtip_block_compat_ioctl_70_p3; 4746 struct hd_geometry *var_group3; 4747 struct blk_mq_hw_ctx *var_group4; 4748 const struct blk_mq_queue_data *var_mtip_queue_rq_75_p1; 4749 void *var_mtip_init_cmd_77_p0; 4750 struct request *var_group5; 4751 unsigned int var_mtip_init_cmd_77_p2; 4752 unsigned int var_mtip_init_cmd_77_p3; 4753 unsigned int var_mtip_init_cmd_77_p4; 4754 void *var_mtip_free_cmd_76_p0; 4755 unsigned int var_mtip_free_cmd_76_p2; 4756 unsigned int var_mtip_free_cmd_76_p3; 4757 struct pci_dev *var_group6; 4758 const struct pci_device_id *var_mtip_pci_probe_88_p1; 4759 int res_mtip_pci_probe_88; 4760 struct pm_message var_mtip_pci_suspend_90_p1; 4761 int var_mtip_irq_handler_23_p0; 4762 void *var_mtip_irq_handler_23_p1; 4763 int ldv_s_mtip_device_status_fops_file_operations; 4764 int ldv_s_mtip_regs_fops_file_operations; 4765 int ldv_s_mtip_flags_fops_file_operations; 4766 int ldv_s_mtip_pci_driver_pci_driver; 4767 int tmp; 4768 int tmp___0; 4769 int tmp___1; 5782 ldv_s_mtip_device_status_fops_file_operations = 0; 5784 ldv_s_mtip_regs_fops_file_operations = 0; 5786 ldv_s_mtip_flags_fops_file_operations = 0; 5792 ldv_s_mtip_pci_driver_pci_driver = 0; 5708 LDV_IN_INTERRUPT = 1; 5779 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 4615 int error; 4616 struct lock_class_key __key; 4617 _Bool tmp; 4618 _Bool tmp___0; 4617 printk("\016mtip32xx Version 1.3.1\n") { /* Function call is skipped due to function is undefined */} 4619 __raw_spin_lock_init(&(dev_lock.__annonCompField8.rlock), "&(&dev_lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */} { 27 union __anonunion___u_19 __u; 27 __u.__val = list; 28 list->prev = list; } { 27 union __anonunion___u_19 __u; 27 __u.__val = list; 28 list->prev = list; } 4625 error = register_blkdev(0U, "mtip32xx") { /* Function call is skipped due to function is undefined */} 4631 mtip_major = error; 4633 dfs_parent = debugfs_create_dir("rssd", (struct dentry *)0) { /* Function call is skipped due to function is undefined */} { 40 long tmp; 41 long tmp___0; 42 int tmp___1; 40 tmp = __builtin_expect(((unsigned long)ptr) == ((unsigned long)((const void *)0)), 0L) { /* Function call is skipped due to function is undefined */} 40 tmp___1 = 1; } 4635 printk("\fError creating debugfs parent\n") { /* Function call is skipped due to function is undefined */} 4636 dfs_parent = (struct dentry *)0; 4649 error = __pci_register_driver(&mtip_pci_driver, &__this_module, "mtip32xx") { /* Function call is skipped due to function is undefined */} } 5798 goto ldv_48047; 5798 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 5804 goto ldv_48046; 5799 ldv_48046:; 5805 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 5805 switch (tmp___0); 6177 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} 6178 -mtip_block_compat_ioctl(var_group2, var_mtip_block_compat_ioctl_70_p1, var_mtip_block_compat_ioctl_70_p2, var_mtip_block_compat_ioctl_70_p3) { } 3499 struct driver_data *dd; 3500 _Bool tmp; 3501 int tmp___0; 3502 int tmp___1; 3503 long tmp___2; 3504 struct mtip_compat_ide_task_request_s *compat_req_task; 3505 struct ide_task_request_s req_task; 3506 int compat_tasksize; 3507 int outtotal; 3508 int ret; 3509 unsigned long tmp___3; 3510 int __ret_gu; 3511 unsigned long __val_gu; 3512 long tmp___4; 3513 int __ret_gu___0; 3514 unsigned long __val_gu___0; 3515 long tmp___5; 3516 unsigned long tmp___6; 3517 int __ret_pu; 3518 unsigned int __pu_val; 3519 long tmp___7; 3520 int __ret_pu___0; 3521 unsigned int __pu_val___0; 3522 long tmp___8; 3523 int tmp___9; 3501 struct driver_data *__CPAchecker_TMP_0 = (struct driver_data *)(dev->bd_disk->private_data); 3501 dd = __CPAchecker_TMP_0; 3503 tmp = capable(21) { /* Function call is skipped due to function is undefined */} 3503 tmp___0 = 0; { 310 return ((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1;; } 3509 tmp___2 = __builtin_expect(tmp___1 != 0, 0L) { /* Function call is skipped due to function is undefined */} 3512 switch (cmd); 3520 compat_tasksize = 40; 3523 compat_req_task = (struct mtip_compat_ide_task_request_s *)arg; 3526 -copy_from_user((void *)(&req_task), (const void *)arg, ((unsigned long)compat_tasksize) - 8UL) { 712 int sz; 713 unsigned long tmp; 714 long tmp___0; 715 long tmp___1; 712 tmp = __builtin_object_size((const void *)to, 0) { /* Function call is skipped due to function is undefined */} 712 sz = (int)tmp; 714 __might_fault("./arch/x86/include/asm/uaccess.h", 714) { /* Function call is skipped due to function is undefined */} 734 tmp___0 = __builtin_expect(sz < 0, 1L) { /* Function call is skipped due to function is undefined */} 734 assume(tmp___0 != 0L); 735 n = _copy_from_user(to, from, (unsigned int)n) { /* Function call is skipped due to function is undefined */} 741 return n;; } 3530 __might_fault("/home/cluser/ldv/ref_launch/work/current--X--drivers--X--defaultlinux-4.5-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.5-rc1.tar.xz/csd_deg_dscv/11248/dscv_tempdir/dscv/ri/331_1a/drivers/block/mtip32xx/mtip32xx.c", 3530) { /* Function call is skipped due to function is undefined */} 3530 Ignored inline assembler code 3530 req_task.out_size = (unsigned long)((unsigned int)__val_gu); 3530 tmp___4 = __builtin_expect((long)__ret_gu, 0L) { /* Function call is skipped due to function is undefined */} 3533 __might_fault("/home/cluser/ldv/ref_launch/work/current--X--drivers--X--defaultlinux-4.5-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.5-rc1.tar.xz/csd_deg_dscv/11248/dscv_tempdir/dscv/ri/331_1a/drivers/block/mtip32xx/mtip32xx.c", 3533) { /* Function call is skipped due to function is undefined */} 3533 Ignored inline assembler code 3533 req_task.in_size = (unsigned long)((unsigned int)__val_gu___0); 3533 tmp___5 = __builtin_expect((long)__ret_gu___0, 0L) { /* Function call is skipped due to function is undefined */} 3536 outtotal = 40; { } 2004 struct host_to_dev_fis fis; 2005 struct host_to_dev_fis *reply; 2006 u8 *outbuf; 2007 u8 *inbuf; 2008 unsigned long long outbuf_dma; 2009 unsigned long long inbuf_dma; 2010 unsigned long long dma_buffer; 2011 int err; 2012 unsigned int taskin; 2013 unsigned int taskout; 2014 unsigned char nsect; 2015 unsigned int timeout; 2016 unsigned int force_single_sector; 2017 unsigned int transfer_size; 2018 unsigned long task_file_data; 2019 int intotal; 2020 int erasemode; 2021 void *tmp; 2022 long tmp___0; 2023 _Bool tmp___1; 2024 void *tmp___2; 2025 long tmp___3; 2026 _Bool tmp___4; 2027 int tmp___5; 2028 unsigned int tmp___6; 2029 unsigned long tmp___7; 2030 unsigned long tmp___8; 2008 outbuf = (u8 *)0U; 2009 inbuf = (u8 *)0U; 2010 outbuf_dma = 0ULL; 2011 inbuf_dma = 0ULL; 2012 dma_buffer = 0ULL; 2013 err = 0; 2014 taskin = 0U; 2015 taskout = 0U; 2016 nsect = 0U; 2021 unsigned int __CPAchecker_TMP_0 = (unsigned int)(req_task->out_size); 2021 intotal = (int)(__CPAchecker_TMP_0 + ((unsigned int)outtotal)); 2022 erasemode = 0; 2024 unsigned int __CPAchecker_TMP_1 = (unsigned int)(req_task->out_size); 2024 taskout = __CPAchecker_TMP_1; 2025 unsigned int __CPAchecker_TMP_2 = (unsigned int)(req_task->in_size); 2025 taskin = __CPAchecker_TMP_2; 2033 tmp = memdup_user(((const void *)buf) + ((unsigned long)outtotal), (size_t )taskout) { /* Function call is skipped due to function is undefined */} 2033 outbuf = (u8 *)tmp; { 35 long tmp; 35 tmp = __builtin_expect(((unsigned long)ptr) > 18446744073709547520UL, 0L) { /* Function call is skipped due to function is undefined */} } { 35 unsigned long long tmp; 34 struct device *__CPAchecker_TMP_0; 34 __CPAchecker_TMP_0 = &(hwdev->dev); 34 -ldv_dma_map_single_attrs_1(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, (struct dma_attrs *)0) { } 14 unsigned long long tmp; { } 58 unsigned long long nonedetermined; 59 void *tmp; 58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 58 nonedetermined = (dma_addr_t )tmp; 63 LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1; } 2047 dma_buffer = outbuf_dma; 2051 tmp___2 = memdup_user(((const void *)buf) + ((unsigned long)intotal), (size_t )taskin) { /* Function call is skipped due to function is undefined */} 2051 inbuf = (u8 *)tmp___2; { 35 long tmp; 35 tmp = __builtin_expect(((unsigned long)ptr) > 18446744073709547520UL, 0L) { /* Function call is skipped due to function is undefined */} } { } 35 unsigned long long tmp; 34 struct device *__CPAchecker_TMP_0; 34 __CPAchecker_TMP_0 = &(hwdev->dev); 34 -ldv_dma_map_single_attrs_1(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, (struct dma_attrs *)0) { } 14 unsigned long long tmp; { } 58 unsigned long long nonedetermined; 59 void *tmp; 58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 58 nonedetermined = (dma_addr_t )tmp; } | Source code
1 #ifndef _ASM_X86_BITOPS_H
2 #define _ASM_X86_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 *
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
9 */
10
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14
15 #include <linux/compiler.h>
16 #include <asm/alternative.h>
17 #include <asm/rmwcc.h>
18 #include <asm/barrier.h>
19
20 #if BITS_PER_LONG == 32
21 # define _BITOPS_LONG_SHIFT 5
22 #elif BITS_PER_LONG == 64
23 # define _BITOPS_LONG_SHIFT 6
24 #else
25 # error "Unexpected BITS_PER_LONG"
26 #endif
27
28 #define BIT_64(n) (U64_C(1) << (n))
29
30 /*
31 * These have to be done with inline assembly: that way the bit-setting
32 * is guaranteed to be atomic. All bit operations return 0 if the bit
33 * was cleared before the operation and != 0 if it was not.
34 *
35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
36 */
37
38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
39 /* Technically wrong, but this avoids compilation errors on some gcc
40 versions. */
41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
42 #else
43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
44 #endif
45
46 #define ADDR BITOP_ADDR(addr)
47
48 /*
49 * We do the locked ops that don't return the old value as
50 * a mask operation on a byte.
51 */
52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
54 #define CONST_MASK(nr) (1 << ((nr) & 7))
55
56 /**
57 * set_bit - Atomically set a bit in memory
58 * @nr: the bit to set
59 * @addr: the address to start counting from
60 *
61 * This function is atomic and may not be reordered. See __set_bit()
62 * if you do not require the atomic guarantees.
63 *
64 * Note: there are no guarantees that this function will not be reordered
65 * on non x86 architectures, so if you are writing portable code,
66 * make sure not to rely on its reordering guarantees.
67 *
68 * Note that @nr may be almost arbitrarily large; this function is not
69 * restricted to acting on a single-word quantity.
70 */
71 static __always_inline void
72 set_bit(long nr, volatile unsigned long *addr)
73 {
74 if (IS_IMMEDIATE(nr)) {
75 asm volatile(LOCK_PREFIX "orb %1,%0"
76 : CONST_MASK_ADDR(nr, addr)
77 : "iq" ((u8)CONST_MASK(nr))
78 : "memory");
79 } else {
80 asm volatile(LOCK_PREFIX "bts %1,%0"
81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
82 }
83 }
84
85 /**
86 * __set_bit - Set a bit in memory
87 * @nr: the bit to set
88 * @addr: the address to start counting from
89 *
90 * Unlike set_bit(), this function is non-atomic and may be reordered.
91 * If it's called on the same region of memory simultaneously, the effect
92 * may be that only one operation succeeds.
93 */
94 static inline void __set_bit(long nr, volatile unsigned long *addr)
95 {
96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
97 }
98
99 /**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109 static __always_inline void
110 clear_bit(long nr, volatile unsigned long *addr)
111 {
112 if (IS_IMMEDIATE(nr)) {
113 asm volatile(LOCK_PREFIX "andb %1,%0"
114 : CONST_MASK_ADDR(nr, addr)
115 : "iq" ((u8)~CONST_MASK(nr)));
116 } else {
117 asm volatile(LOCK_PREFIX "btr %1,%0"
118 : BITOP_ADDR(addr)
119 : "Ir" (nr));
120 }
121 }
122
123 /*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132 {
133 barrier();
134 clear_bit(nr, addr);
135 }
136
137 static inline void __clear_bit(long nr, volatile unsigned long *addr)
138 {
139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140 }
141
142 /*
143 * __clear_bit_unlock - Clears a bit in memory
144 * @nr: Bit to clear
145 * @addr: Address to start counting from
146 *
147 * __clear_bit() is non-atomic and implies release semantics before the memory
148 * operation. It can be used for an unlock if no other CPUs can concurrently
149 * modify other bits in the word.
150 *
151 * No memory barrier is required here, because x86 cannot reorder stores past
152 * older loads. Same principle as spin_unlock.
153 */
154 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
155 {
156 barrier();
157 __clear_bit(nr, addr);
158 }
159
160 /**
161 * __change_bit - Toggle a bit in memory
162 * @nr: the bit to change
163 * @addr: the address to start counting from
164 *
165 * Unlike change_bit(), this function is non-atomic and may be reordered.
166 * If it's called on the same region of memory simultaneously, the effect
167 * may be that only one operation succeeds.
168 */
169 static inline void __change_bit(long nr, volatile unsigned long *addr)
170 {
171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
172 }
173
174 /**
175 * change_bit - Toggle a bit in memory
176 * @nr: Bit to change
177 * @addr: Address to start counting from
178 *
179 * change_bit() is atomic and may not be reordered.
180 * Note that @nr may be almost arbitrarily large; this function is not
181 * restricted to acting on a single-word quantity.
182 */
183 static inline void change_bit(long nr, volatile unsigned long *addr)
184 {
185 if (IS_IMMEDIATE(nr)) {
186 asm volatile(LOCK_PREFIX "xorb %1,%0"
187 : CONST_MASK_ADDR(nr, addr)
188 : "iq" ((u8)CONST_MASK(nr)));
189 } else {
190 asm volatile(LOCK_PREFIX "btc %1,%0"
191 : BITOP_ADDR(addr)
192 : "Ir" (nr));
193 }
194 }
195
196 /**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.
202 * It also implies a memory barrier.
203 */
204 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
205 {
206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
207 }
208
209 /**
210 * test_and_set_bit_lock - Set a bit and return its old value for lock
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This is the same as test_and_set_bit on x86.
215 */
216 static __always_inline int
217 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
218 {
219 return test_and_set_bit(nr, addr);
220 }
221
222 /**
223 * __test_and_set_bit - Set a bit and return its old value
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This operation is non-atomic and can be reordered.
228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail. You must protect multiple accesses with a lock.
230 */
231 static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
232 {
233 int oldbit;
234
235 asm("bts %2,%1\n\t"
236 "sbb %0,%0"
237 : "=r" (oldbit), ADDR
238 : "Ir" (nr));
239 return oldbit;
240 }
241
242 /**
243 * test_and_clear_bit - Clear a bit and return its old value
244 * @nr: Bit to clear
245 * @addr: Address to count from
246 *
247 * This operation is atomic and cannot be reordered.
248 * It also implies a memory barrier.
249 */
250 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
251 {
252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
253 }
254
255 /**
256 * __test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is non-atomic and can be reordered.
261 * If two examples of this operation race, one can appear to succeed
262 * but actually fail. You must protect multiple accesses with a lock.
263 *
264 * Note: the operation is performed atomically with respect to
265 * the local CPU, but not other CPUs. Portable code should not
266 * rely on this behaviour.
267 * KVM relies on this behaviour on x86 for modifying memory that is also
268 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269 * this without also updating arch/x86/kernel/kvm.c
270 */
271 static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
272 {
273 int oldbit;
274
275 asm volatile("btr %2,%1\n\t"
276 "sbb %0,%0"
277 : "=r" (oldbit), ADDR
278 : "Ir" (nr));
279 return oldbit;
280 }
281
282 /* WARNING: non atomic and it can be reordered! */
283 static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
284 {
285 int oldbit;
286
287 asm volatile("btc %2,%1\n\t"
288 "sbb %0,%0"
289 : "=r" (oldbit), ADDR
290 : "Ir" (nr) : "memory");
291
292 return oldbit;
293 }
294
295 /**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier.
302 */
303 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
304 {
305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
306 }
307
308 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
309 {
310 return ((1UL << (nr & (BITS_PER_LONG-1))) &
311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
312 }
313
314 static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
315 {
316 int oldbit;
317
318 asm volatile("bt %2,%1\n\t"
319 "sbb %0,%0"
320 : "=r" (oldbit)
321 : "m" (*(unsigned long *)addr), "Ir" (nr));
322
323 return oldbit;
324 }
325
326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
327 /**
328 * test_bit - Determine whether a bit is set
329 * @nr: bit number to test
330 * @addr: Address to start counting from
331 */
332 static int test_bit(int nr, const volatile unsigned long *addr);
333 #endif
334
335 #define test_bit(nr, addr) \
336 (__builtin_constant_p((nr)) \
337 ? constant_test_bit((nr), (addr)) \
338 : variable_test_bit((nr), (addr)))
339
340 /**
341 * __ffs - find first set bit in word
342 * @word: The word to search
343 *
344 * Undefined if no bit exists, so code should check against 0 first.
345 */
346 static inline unsigned long __ffs(unsigned long word)
347 {
348 asm("rep; bsf %1,%0"
349 : "=r" (word)
350 : "rm" (word));
351 return word;
352 }
353
354 /**
355 * ffz - find first zero bit in word
356 * @word: The word to search
357 *
358 * Undefined if no zero exists, so code should check against ~0UL first.
359 */
360 static inline unsigned long ffz(unsigned long word)
361 {
362 asm("rep; bsf %1,%0"
363 : "=r" (word)
364 : "r" (~word));
365 return word;
366 }
367
368 /*
369 * __fls: find last set bit in word
370 * @word: The word to search
371 *
372 * Undefined if no set bit exists, so code should check against 0 first.
373 */
374 static inline unsigned long __fls(unsigned long word)
375 {
376 asm("bsr %1,%0"
377 : "=r" (word)
378 : "rm" (word));
379 return word;
380 }
381
382 #undef ADDR
383
384 #ifdef __KERNEL__
385 /**
386 * ffs - find first set bit in word
387 * @x: the word to search
388 *
389 * This is defined the same way as the libc and compiler builtin ffs
390 * routines, therefore differs in spirit from the other bitops.
391 *
392 * ffs(value) returns 0 if value is 0 or the position of the first
393 * set bit if value is nonzero. The first (least significant) bit
394 * is at position 1.
395 */
396 static inline int ffs(int x)
397 {
398 int r;
399
400 #ifdef CONFIG_X86_64
401 /*
402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
403 * dest reg is undefined if x==0, but their CPU architect says its
404 * value is written to set it to the same as before, except that the
405 * top 32 bits will be cleared.
406 *
407 * We cannot do this on 32 bits because at the very least some
408 * 486 CPUs did not behave this way.
409 */
410 asm("bsfl %1,%0"
411 : "=r" (r)
412 : "rm" (x), "0" (-1));
413 #elif defined(CONFIG_X86_CMOV)
414 asm("bsfl %1,%0\n\t"
415 "cmovzl %2,%0"
416 : "=&r" (r) : "rm" (x), "r" (-1));
417 #else
418 asm("bsfl %1,%0\n\t"
419 "jnz 1f\n\t"
420 "movl $-1,%0\n"
421 "1:" : "=r" (r) : "rm" (x));
422 #endif
423 return r + 1;
424 }
425
426 /**
427 * fls - find last set bit in word
428 * @x: the word to search
429 *
430 * This is defined in a similar way as the libc and compiler builtin
431 * ffs, but returns the position of the most significant set bit.
432 *
433 * fls(value) returns 0 if value is 0 or the position of the last
434 * set bit if value is nonzero. The last (most significant) bit is
435 * at position 32.
436 */
437 static inline int fls(int x)
438 {
439 int r;
440
441 #ifdef CONFIG_X86_64
442 /*
443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
444 * dest reg is undefined if x==0, but their CPU architect says its
445 * value is written to set it to the same as before, except that the
446 * top 32 bits will be cleared.
447 *
448 * We cannot do this on 32 bits because at the very least some
449 * 486 CPUs did not behave this way.
450 */
451 asm("bsrl %1,%0"
452 : "=r" (r)
453 : "rm" (x), "0" (-1));
454 #elif defined(CONFIG_X86_CMOV)
455 asm("bsrl %1,%0\n\t"
456 "cmovzl %2,%0"
457 : "=&r" (r) : "rm" (x), "rm" (-1));
458 #else
459 asm("bsrl %1,%0\n\t"
460 "jnz 1f\n\t"
461 "movl $-1,%0\n"
462 "1:" : "=r" (r) : "rm" (x));
463 #endif
464 return r + 1;
465 }
466
467 /**
468 * fls64 - find last set bit in a 64-bit word
469 * @x: the word to search
470 *
471 * This is defined in a similar way as the libc and compiler builtin
472 * ffsll, but returns the position of the most significant set bit.
473 *
474 * fls64(value) returns 0 if value is 0 or the position of the last
475 * set bit if value is nonzero. The last (most significant) bit is
476 * at position 64.
477 */
478 #ifdef CONFIG_X86_64
479 static __always_inline int fls64(__u64 x)
480 {
481 int bitpos = -1;
482 /*
483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
484 * dest reg is undefined if x==0, but their CPU architect says its
485 * value is written to set it to the same as before.
486 */
487 asm("bsrq %1,%q0"
488 : "+r" (bitpos)
489 : "rm" (x));
490 return bitpos + 1;
491 }
492 #else
493 #include <asm-generic/bitops/fls64.h>
494 #endif
495
496 #include <asm-generic/bitops/find.h>
497
498 #include <asm-generic/bitops/sched.h>
499
500 #include <asm/arch_hweight.h>
501
502 #include <asm-generic/bitops/const_hweight.h>
503
504 #include <asm-generic/bitops/le.h>
505
506 #include <asm-generic/bitops/ext2-atomic-setbit.h>
507
508 #endif /* __KERNEL__ */
509 #endif /* _ASM_X86_BITOPS_H */ 1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
3 /*
4 * User space memory access functions
5 */
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13
14 #define VERIFY_READ 0
15 #define VERIFY_WRITE 1
16
17 /*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27 #define KERNEL_DS MAKE_MM_SEG(-1UL)
28 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
29
30 #define get_ds() (KERNEL_DS)
31 #define get_fs() (current_thread_info()->addr_limit)
32 #define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34 #define segment_eq(a, b) ((a).seg == (b).seg)
35
36 #define user_addr_max() (current_thread_info()->addr_limit.seg)
37 #define __addr_ok(addr) \
38 ((unsigned long __force)(addr) < user_addr_max())
39
40 /*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 */
44 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45 {
46 /*
47 * If we have used "sizeof()" for the size,
48 * we know it won't overflow the limit (but
49 * it might overflow the 'addr', so it's
50 * important to subtract the size from the
51 * limit, not add it to the address).
52 */
53 if (__builtin_constant_p(size))
54 return unlikely(addr > limit - size);
55
56 /* Arbitrary sizes? Be careful about overflow */
57 addr += size;
58 if (unlikely(addr < size))
59 return true;
60 return unlikely(addr > limit);
61 }
62
63 #define __range_not_ok(addr, size, limit) \
64 ({ \
65 __chk_user_ptr(addr); \
66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67 })
68
69 /**
70 * access_ok: - Checks if a user space pointer is valid
71 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
72 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
73 * to write to a block, it is always safe to read from it.
74 * @addr: User space pointer to start of block to check
75 * @size: Size of block to check
76 *
77 * Context: User context only. This function may sleep if pagefaults are
78 * enabled.
79 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
89 #define access_ok(type, addr, size) \
90 likely(!__range_not_ok(addr, size, user_addr_max()))
91
92 /*
93 * The exception table consists of pairs of addresses relative to the
94 * exception table enty itself: the first is the address of an
95 * instruction that is allowed to fault, and the second is the address
96 * at which the program should continue. No registers are modified,
97 * so it is entirely up to the continuation code to figure out what to
98 * do.
99 *
100 * All the routines below use bits of fixup code that are out of line
101 * with the main instruction path. This means when everything is well,
102 * we don't even have to jump over them. Further, they do not intrude
103 * on our cache or tlb entries.
104 */
105
106 struct exception_table_entry {
107 int insn, fixup;
108 };
109 /* This is not the generic standard exception_table_entry format */
110 #define ARCH_HAS_SORT_EXTABLE
111 #define ARCH_HAS_SEARCH_EXTABLE
112
113 extern int fixup_exception(struct pt_regs *regs);
114 extern int early_fixup_exception(unsigned long *ip);
115
116 /*
117 * These are the main single-value transfer routines. They automatically
118 * use the right size if we just have the right pointer type.
119 *
120 * This gets kind of ugly. We want to return _two_ values in "get_user()"
121 * and yet we don't want to do any pointers, because that is too much
122 * of a performance impact. Thus we have a few rather ugly macros here,
123 * and hide all the ugliness from the user.
124 *
125 * The "__xxx" versions of the user access functions are versions that
126 * do not verify the address space, that must have been done previously
127 * with a separate "access_ok()" call (this is used when we do multiple
128 * accesses to the same area of user memory).
129 */
130
131 extern int __get_user_1(void);
132 extern int __get_user_2(void);
133 extern int __get_user_4(void);
134 extern int __get_user_8(void);
135 extern int __get_user_bad(void);
136
137 #define __uaccess_begin() stac()
138 #define __uaccess_end() clac()
139
140 /*
141 * This is a type: either unsigned long, if the argument fits into
142 * that type, or otherwise unsigned long long.
143 */
144 #define __inttype(x) \
145 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
146
147 /**
148 * get_user: - Get a simple variable from user space.
149 * @x: Variable to store result.
150 * @ptr: Source address, in user space.
151 *
152 * Context: User context only. This function may sleep if pagefaults are
153 * enabled.
154 *
155 * This macro copies a single simple variable from user space to kernel
156 * space. It supports simple types like char and int, but not larger
157 * data types like structures or arrays.
158 *
159 * @ptr must have pointer-to-simple-variable type, and the result of
160 * dereferencing @ptr must be assignable to @x without a cast.
161 *
162 * Returns zero on success, or -EFAULT on error.
163 * On error, the variable @x is set to zero.
164 */
165 /*
166 * Careful: we have to cast the result to the type of the pointer
167 * for sign reasons.
168 *
169 * The use of _ASM_DX as the register specifier is a bit of a
170 * simplification, as gcc only cares about it as the starting point
171 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
172 * (%ecx being the next register in gcc's x86 register sequence), and
173 * %rdx on 64 bits.
174 *
175 * Clang/LLVM cares about the size of the register, but still wants
176 * the base register for something that ends up being a pair.
177 */
178 #define get_user(x, ptr) \
179 ({ \
180 int __ret_gu; \
181 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
182 __chk_user_ptr(ptr); \
183 might_fault(); \
184 asm volatile("call __get_user_%P3" \
185 : "=a" (__ret_gu), "=r" (__val_gu) \
186 : "0" (ptr), "i" (sizeof(*(ptr)))); \
187 (x) = (__force __typeof__(*(ptr))) __val_gu; \
188 __builtin_expect(__ret_gu, 0); \
189 })
190
191 #define __put_user_x(size, x, ptr, __ret_pu) \
192 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
193 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
194
195
196
197 #ifdef CONFIG_X86_32
198 #define __put_user_asm_u64(x, addr, err, errret) \
199 asm volatile("\n" \
200 "1: movl %%eax,0(%2)\n" \
201 "2: movl %%edx,4(%2)\n" \
202 "3:" \
203 ".section .fixup,\"ax\"\n" \
204 "4: movl %3,%0\n" \
205 " jmp 3b\n" \
206 ".previous\n" \
207 _ASM_EXTABLE(1b, 4b) \
208 _ASM_EXTABLE(2b, 4b) \
209 : "=r" (err) \
210 : "A" (x), "r" (addr), "i" (errret), "0" (err))
211
212 #define __put_user_asm_ex_u64(x, addr) \
213 asm volatile("\n" \
214 "1: movl %%eax,0(%1)\n" \
215 "2: movl %%edx,4(%1)\n" \
216 "3:" \
217 _ASM_EXTABLE_EX(1b, 2b) \
218 _ASM_EXTABLE_EX(2b, 3b) \
219 : : "A" (x), "r" (addr))
220
221 #define __put_user_x8(x, ptr, __ret_pu) \
222 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
223 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
224 #else
225 #define __put_user_asm_u64(x, ptr, retval, errret) \
226 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
227 #define __put_user_asm_ex_u64(x, addr) \
228 __put_user_asm_ex(x, addr, "q", "", "er")
229 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
230 #endif
231
232 extern void __put_user_bad(void);
233
234 /*
235 * Strange magic calling convention: pointer in %ecx,
236 * value in %eax(:%edx), return value in %eax. clobbers %rbx
237 */
238 extern void __put_user_1(void);
239 extern void __put_user_2(void);
240 extern void __put_user_4(void);
241 extern void __put_user_8(void);
242
243 /**
244 * put_user: - Write a simple value into user space.
245 * @x: Value to copy to user space.
246 * @ptr: Destination address, in user space.
247 *
248 * Context: User context only. This function may sleep if pagefaults are
249 * enabled.
250 *
251 * This macro copies a single simple value from kernel space to user
252 * space. It supports simple types like char and int, but not larger
253 * data types like structures or arrays.
254 *
255 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
256 * to the result of dereferencing @ptr.
257 *
258 * Returns zero on success, or -EFAULT on error.
259 */
260 #define put_user(x, ptr) \
261 ({ \
262 int __ret_pu; \
263 __typeof__(*(ptr)) __pu_val; \
264 __chk_user_ptr(ptr); \
265 might_fault(); \
266 __pu_val = x; \
267 switch (sizeof(*(ptr))) { \
268 case 1: \
269 __put_user_x(1, __pu_val, ptr, __ret_pu); \
270 break; \
271 case 2: \
272 __put_user_x(2, __pu_val, ptr, __ret_pu); \
273 break; \
274 case 4: \
275 __put_user_x(4, __pu_val, ptr, __ret_pu); \
276 break; \
277 case 8: \
278 __put_user_x8(__pu_val, ptr, __ret_pu); \
279 break; \
280 default: \
281 __put_user_x(X, __pu_val, ptr, __ret_pu); \
282 break; \
283 } \
284 __builtin_expect(__ret_pu, 0); \
285 })
286
287 #define __put_user_size(x, ptr, size, retval, errret) \
288 do { \
289 retval = 0; \
290 __chk_user_ptr(ptr); \
291 switch (size) { \
292 case 1: \
293 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
294 break; \
295 case 2: \
296 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
297 break; \
298 case 4: \
299 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
300 break; \
301 case 8: \
302 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
303 errret); \
304 break; \
305 default: \
306 __put_user_bad(); \
307 } \
308 } while (0)
309
310 /*
311 * This doesn't do __uaccess_begin/end - the exception handling
312 * around it must do that.
313 */
314 #define __put_user_size_ex(x, ptr, size) \
315 do { \
316 __chk_user_ptr(ptr); \
317 switch (size) { \
318 case 1: \
319 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
320 break; \
321 case 2: \
322 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
323 break; \
324 case 4: \
325 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
326 break; \
327 case 8: \
328 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
329 break; \
330 default: \
331 __put_user_bad(); \
332 } \
333 } while (0)
334
335 #ifdef CONFIG_X86_32
336 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
337 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
338 #else
339 #define __get_user_asm_u64(x, ptr, retval, errret) \
340 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
341 #define __get_user_asm_ex_u64(x, ptr) \
342 __get_user_asm_ex(x, ptr, "q", "", "=r")
343 #endif
344
345 #define __get_user_size(x, ptr, size, retval, errret) \
346 do { \
347 retval = 0; \
348 __chk_user_ptr(ptr); \
349 switch (size) { \
350 case 1: \
351 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
352 break; \
353 case 2: \
354 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
355 break; \
356 case 4: \
357 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
358 break; \
359 case 8: \
360 __get_user_asm_u64(x, ptr, retval, errret); \
361 break; \
362 default: \
363 (x) = __get_user_bad(); \
364 } \
365 } while (0)
366
367 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
368 asm volatile("\n" \
369 "1: mov"itype" %2,%"rtype"1\n" \
370 "2:\n" \
371 ".section .fixup,\"ax\"\n" \
372 "3: mov %3,%0\n" \
373 " xor"itype" %"rtype"1,%"rtype"1\n" \
374 " jmp 2b\n" \
375 ".previous\n" \
376 _ASM_EXTABLE(1b, 3b) \
377 : "=r" (err), ltype(x) \
378 : "m" (__m(addr)), "i" (errret), "0" (err))
379
380 /*
381 * This doesn't do __uaccess_begin/end - the exception handling
382 * around it must do that.
383 */
384 #define __get_user_size_ex(x, ptr, size) \
385 do { \
386 __chk_user_ptr(ptr); \
387 switch (size) { \
388 case 1: \
389 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
390 break; \
391 case 2: \
392 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
393 break; \
394 case 4: \
395 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
396 break; \
397 case 8: \
398 __get_user_asm_ex_u64(x, ptr); \
399 break; \
400 default: \
401 (x) = __get_user_bad(); \
402 } \
403 } while (0)
404
405 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
406 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
407 "2:\n" \
408 _ASM_EXTABLE_EX(1b, 2b) \
409 : ltype(x) : "m" (__m(addr)))
410
411 #define __put_user_nocheck(x, ptr, size) \
412 ({ \
413 int __pu_err; \
414 __uaccess_begin(); \
415 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
416 __uaccess_end(); \
417 __builtin_expect(__pu_err, 0); \
418 })
419
420 #define __get_user_nocheck(x, ptr, size) \
421 ({ \
422 int __gu_err; \
423 unsigned long __gu_val; \
424 __uaccess_begin(); \
425 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
426 __uaccess_end(); \
427 (x) = (__force __typeof__(*(ptr)))__gu_val; \
428 __builtin_expect(__gu_err, 0); \
429 })
430
431 /* FIXME: this hack is definitely wrong -AK */
432 struct __large_struct { unsigned long buf[100]; };
433 #define __m(x) (*(struct __large_struct __user *)(x))
434
435 /*
436 * Tell gcc we read from memory instead of writing: this is because
437 * we do not write to any memory gcc knows about, so there are no
438 * aliasing issues.
439 */
440 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
441 asm volatile("\n" \
442 "1: mov"itype" %"rtype"1,%2\n" \
443 "2:\n" \
444 ".section .fixup,\"ax\"\n" \
445 "3: mov %3,%0\n" \
446 " jmp 2b\n" \
447 ".previous\n" \
448 _ASM_EXTABLE(1b, 3b) \
449 : "=r"(err) \
450 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
451
452 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
453 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
454 "2:\n" \
455 _ASM_EXTABLE_EX(1b, 2b) \
456 : : ltype(x), "m" (__m(addr)))
457
458 /*
459 * uaccess_try and catch
460 */
461 #define uaccess_try do { \
462 current_thread_info()->uaccess_err = 0; \
463 __uaccess_begin(); \
464 barrier();
465
466 #define uaccess_catch(err) \
467 __uaccess_end(); \
468 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
469 } while (0)
470
471 /**
472 * __get_user: - Get a simple variable from user space, with less checking.
473 * @x: Variable to store result.
474 * @ptr: Source address, in user space.
475 *
476 * Context: User context only. This function may sleep if pagefaults are
477 * enabled.
478 *
479 * This macro copies a single simple variable from user space to kernel
480 * space. It supports simple types like char and int, but not larger
481 * data types like structures or arrays.
482 *
483 * @ptr must have pointer-to-simple-variable type, and the result of
484 * dereferencing @ptr must be assignable to @x without a cast.
485 *
486 * Caller must check the pointer with access_ok() before calling this
487 * function.
488 *
489 * Returns zero on success, or -EFAULT on error.
490 * On error, the variable @x is set to zero.
491 */
492
493 #define __get_user(x, ptr) \
494 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
495
496 /**
497 * __put_user: - Write a simple value into user space, with less checking.
498 * @x: Value to copy to user space.
499 * @ptr: Destination address, in user space.
500 *
501 * Context: User context only. This function may sleep if pagefaults are
502 * enabled.
503 *
504 * This macro copies a single simple value from kernel space to user
505 * space. It supports simple types like char and int, but not larger
506 * data types like structures or arrays.
507 *
508 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
509 * to the result of dereferencing @ptr.
510 *
511 * Caller must check the pointer with access_ok() before calling this
512 * function.
513 *
514 * Returns zero on success, or -EFAULT on error.
515 */
516
517 #define __put_user(x, ptr) \
518 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
519
520 #define __get_user_unaligned __get_user
521 #define __put_user_unaligned __put_user
522
523 /*
524 * {get|put}_user_try and catch
525 *
526 * get_user_try {
527 * get_user_ex(...);
528 * } get_user_catch(err)
529 */
530 #define get_user_try uaccess_try
531 #define get_user_catch(err) uaccess_catch(err)
532
533 #define get_user_ex(x, ptr) do { \
534 unsigned long __gue_val; \
535 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
536 (x) = (__force __typeof__(*(ptr)))__gue_val; \
537 } while (0)
538
539 #define put_user_try uaccess_try
540 #define put_user_catch(err) uaccess_catch(err)
541
542 #define put_user_ex(x, ptr) \
543 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
544
545 extern unsigned long
546 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
547 extern __must_check long
548 strncpy_from_user(char *dst, const char __user *src, long count);
549
550 extern __must_check long strlen_user(const char __user *str);
551 extern __must_check long strnlen_user(const char __user *str, long n);
552
553 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
554 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
555
556 extern void __cmpxchg_wrong_size(void)
557 __compiletime_error("Bad argument size for cmpxchg");
558
559 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
560 ({ \
561 int __ret = 0; \
562 __typeof__(ptr) __uval = (uval); \
563 __typeof__(*(ptr)) __old = (old); \
564 __typeof__(*(ptr)) __new = (new); \
565 __uaccess_begin(); \
566 switch (size) { \
567 case 1: \
568 { \
569 asm volatile("\n" \
570 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
571 "2:\n" \
572 "\t.section .fixup, \"ax\"\n" \
573 "3:\tmov %3, %0\n" \
574 "\tjmp 2b\n" \
575 "\t.previous\n" \
576 _ASM_EXTABLE(1b, 3b) \
577 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
578 : "i" (-EFAULT), "q" (__new), "1" (__old) \
579 : "memory" \
580 ); \
581 break; \
582 } \
583 case 2: \
584 { \
585 asm volatile("\n" \
586 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
587 "2:\n" \
588 "\t.section .fixup, \"ax\"\n" \
589 "3:\tmov %3, %0\n" \
590 "\tjmp 2b\n" \
591 "\t.previous\n" \
592 _ASM_EXTABLE(1b, 3b) \
593 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
594 : "i" (-EFAULT), "r" (__new), "1" (__old) \
595 : "memory" \
596 ); \
597 break; \
598 } \
599 case 4: \
600 { \
601 asm volatile("\n" \
602 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
603 "2:\n" \
604 "\t.section .fixup, \"ax\"\n" \
605 "3:\tmov %3, %0\n" \
606 "\tjmp 2b\n" \
607 "\t.previous\n" \
608 _ASM_EXTABLE(1b, 3b) \
609 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
610 : "i" (-EFAULT), "r" (__new), "1" (__old) \
611 : "memory" \
612 ); \
613 break; \
614 } \
615 case 8: \
616 { \
617 if (!IS_ENABLED(CONFIG_X86_64)) \
618 __cmpxchg_wrong_size(); \
619 \
620 asm volatile("\n" \
621 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
622 "2:\n" \
623 "\t.section .fixup, \"ax\"\n" \
624 "3:\tmov %3, %0\n" \
625 "\tjmp 2b\n" \
626 "\t.previous\n" \
627 _ASM_EXTABLE(1b, 3b) \
628 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
629 : "i" (-EFAULT), "r" (__new), "1" (__old) \
630 : "memory" \
631 ); \
632 break; \
633 } \
634 default: \
635 __cmpxchg_wrong_size(); \
636 } \
637 __uaccess_end(); \
638 *__uval = __old; \
639 __ret; \
640 })
641
642 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
643 ({ \
644 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
645 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
646 (old), (new), sizeof(*(ptr))) : \
647 -EFAULT; \
648 })
649
650 /*
651 * movsl can be slow when source and dest are not both 8-byte aligned
652 */
653 #ifdef CONFIG_X86_INTEL_USERCOPY
654 extern struct movsl_mask {
655 int mask;
656 } ____cacheline_aligned_in_smp movsl_mask;
657 #endif
658
659 #define ARCH_HAS_NOCACHE_UACCESS 1
660
661 #ifdef CONFIG_X86_32
662 # include <asm/uaccess_32.h>
663 #else
664 # include <asm/uaccess_64.h>
665 #endif
666
667 unsigned long __must_check _copy_from_user(void *to, const void __user *from,
668 unsigned n);
669 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
670 unsigned n);
671
672 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
673 # define copy_user_diag __compiletime_error
674 #else
675 # define copy_user_diag __compiletime_warning
676 #endif
677
678 extern void copy_user_diag("copy_from_user() buffer size is too small")
679 copy_from_user_overflow(void);
680 extern void copy_user_diag("copy_to_user() buffer size is too small")
681 copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
682
683 #undef copy_user_diag
684
685 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
686
687 extern void
688 __compiletime_warning("copy_from_user() buffer size is not provably correct")
689 __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
690 #define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
691
692 extern void
693 __compiletime_warning("copy_to_user() buffer size is not provably correct")
694 __copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
695 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
696
697 #else
698
699 static inline void
700 __copy_from_user_overflow(int size, unsigned long count)
701 {
702 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
703 }
704
705 #define __copy_to_user_overflow __copy_from_user_overflow
706
707 #endif
708
709 static inline unsigned long __must_check
710 copy_from_user(void *to, const void __user *from, unsigned long n)
711 {
712 int sz = __compiletime_object_size(to);
713
714 might_fault();
715
716 /*
717 * While we would like to have the compiler do the checking for us
718 * even in the non-constant size case, any false positives there are
719 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
720 * without - the [hopefully] dangerous looking nature of the warning
721 * would make people go look at the respecitive call sites over and
722 * over again just to find that there's no problem).
723 *
724 * And there are cases where it's just not realistic for the compiler
725 * to prove the count to be in range. For example when multiple call
726 * sites of a helper function - perhaps in different source files -
727 * all doing proper range checking, yet the helper function not doing
728 * so again.
729 *
730 * Therefore limit the compile time checking to the constant size
731 * case, and do only runtime checking for non-constant sizes.
732 */
733
734 if (likely(sz < 0 || sz >= n))
735 n = _copy_from_user(to, from, n);
736 else if(__builtin_constant_p(n))
737 copy_from_user_overflow();
738 else
739 __copy_from_user_overflow(sz, n);
740
741 return n;
742 }
743
744 static inline unsigned long __must_check
745 copy_to_user(void __user *to, const void *from, unsigned long n)
746 {
747 int sz = __compiletime_object_size(from);
748
749 might_fault();
750
751 /* See the comment in copy_from_user() above. */
752 if (likely(sz < 0 || sz >= n))
753 n = _copy_to_user(to, from, n);
754 else if(__builtin_constant_p(n))
755 copy_to_user_overflow();
756 else
757 __copy_to_user_overflow(sz, n);
758
759 return n;
760 }
761
762 #undef __copy_from_user_overflow
763 #undef __copy_to_user_overflow
764
765 /*
766 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
767 * nested NMI paths are careful to preserve CR2.
768 *
769 * Caller must use pagefault_enable/disable, or run in interrupt context,
770 * and also do a uaccess_ok() check
771 */
772 #define __copy_from_user_nmi __copy_from_user_inatomic
773
774 /*
775 * The "unsafe" user accesses aren't really "unsafe", but the naming
776 * is a big fat warning: you have to not only do the access_ok()
777 * checking before using them, but you have to surround them with the
778 * user_access_begin/end() pair.
779 */
780 #define user_access_begin() __uaccess_begin()
781 #define user_access_end() __uaccess_end()
782
783 #define unsafe_put_user(x, ptr) \
784 ({ \
785 int __pu_err; \
786 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
787 __builtin_expect(__pu_err, 0); \
788 })
789
790 #define unsafe_get_user(x, ptr) \
791 ({ \
792 int __gu_err; \
793 unsigned long __gu_val; \
794 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
795 (x) = (__force __typeof__(*(ptr)))__gu_val; \
796 __builtin_expect(__gu_err, 0); \
797 })
798
799 #endif /* _ASM_X86_UACCESS_H */ 1
2 /*
3 * Driver for the Micron P320 SSD
4 * Copyright (C) 2011 Micron Technology, Inc.
5 *
6 * Portions of this code were derived from works subjected to the
7 * following copyright:
8 * Copyright (C) 2009 Integrated Device Technology, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 */
21
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/ata.h>
25 #include <linux/delay.h>
26 #include <linux/hdreg.h>
27 #include <linux/uaccess.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/compat.h>
31 #include <linux/fs.h>
32 #include <linux/module.h>
33 #include <linux/genhd.h>
34 #include <linux/blkdev.h>
35 #include <linux/blk-mq.h>
36 #include <linux/bio.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/idr.h>
39 #include <linux/kthread.h>
40 #include <../drivers/ata/ahci.h>
41 #include <linux/export.h>
42 #include <linux/debugfs.h>
43 #include <linux/prefetch.h>
44 #include "mtip32xx.h"
45
46 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
47
48 /* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
49 #define AHCI_RX_FIS_SZ 0x100
50 #define AHCI_RX_FIS_OFFSET 0x0
51 #define AHCI_IDFY_SZ ATA_SECT_SIZE
52 #define AHCI_IDFY_OFFSET 0x400
53 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
54 #define AHCI_SECTBUF_OFFSET 0x800
55 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
56 #define AHCI_SMARTBUF_OFFSET 0xC00
57 /* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
58 #define BLOCK_DMA_ALLOC_SZ 4096
59
60 /* DMA region containing command table (should be 8192 bytes) */
61 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
62 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
63 #define AHCI_CMD_TBL_OFFSET 0x0
64
65 /* DMA region per command (contains header and SGL) */
66 #define AHCI_CMD_TBL_HDR_SZ 0x80
67 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
68 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
69 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
70 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
71
72
73 #define HOST_CAP_NZDMA (1 << 19)
74 #define HOST_HSORG 0xFC
75 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
76 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
77 #define HSORG_HWREV 0xFF00
78 #define HSORG_STYLE 0x8
79 #define HSORG_SLOTGROUPS 0x7
80
81 #define PORT_COMMAND_ISSUE 0x38
82 #define PORT_SDBV 0x7C
83
84 #define PORT_OFFSET 0x100
85 #define PORT_MEM_SIZE 0x80
86
87 #define PORT_IRQ_ERR \
88 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
89 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
90 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
91 PORT_IRQ_OVERFLOW)
92 #define PORT_IRQ_LEGACY \
93 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
94 #define PORT_IRQ_HANDLED \
95 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
96 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
97 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
98 #define DEF_PORT_IRQ \
99 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
100
101 /* product numbers */
102 #define MTIP_PRODUCT_UNKNOWN 0x00
103 #define MTIP_PRODUCT_ASICFPGA 0x11
104
105 /* Device instance number, incremented each time a device is probed. */
106 static int instance;
107
108 static struct list_head online_list;
109 static struct list_head removing_list;
110 static spinlock_t dev_lock;
111
112 /*
113 * Global variable used to hold the major block device number
114 * allocated in mtip_init().
115 */
116 static int mtip_major;
117 static struct dentry *dfs_parent;
118 static struct dentry *dfs_device_status;
119
120 static u32 cpu_use[NR_CPUS];
121
122 static DEFINE_SPINLOCK(rssd_index_lock);
123 static DEFINE_IDA(rssd_index_ida);
124
125 static int mtip_block_initialize(struct driver_data *dd);
126
127 #ifdef CONFIG_COMPAT
128 struct mtip_compat_ide_task_request_s {
129 __u8 io_ports[8];
130 __u8 hob_ports[8];
131 ide_reg_valid_t out_flags;
132 ide_reg_valid_t in_flags;
133 int data_phase;
134 int req_cmd;
135 compat_ulong_t out_size;
136 compat_ulong_t in_size;
137 };
138 #endif
139
140 /*
141 * This function check_for_surprise_removal is called
142 * while card is removed from the system and it will
143 * read the vendor id from the configration space
144 *
145 * @pdev Pointer to the pci_dev structure.
146 *
147 * return value
148 * true if device removed, else false
149 */
150 static bool mtip_check_surprise_removal(struct pci_dev *pdev)
151 {
152 u16 vendor_id = 0;
153 struct driver_data *dd = pci_get_drvdata(pdev);
154
155 if (dd->sr)
156 return true;
157
158 /* Read the vendorID from the configuration space */
159 pci_read_config_word(pdev, 0x00, &vendor_id);
160 if (vendor_id == 0xFFFF) {
161 dd->sr = true;
162 if (dd->queue)
163 set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags);
164 else
165 dev_warn(&dd->pdev->dev,
166 "%s: dd->queue is NULL\n", __func__);
167 return true; /* device removed */
168 }
169
170 return false; /* device present */
171 }
172
173 static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
174 {
175 struct request *rq;
176
177 rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
178 return blk_mq_rq_to_pdu(rq);
179 }
180
181 static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
182 {
183 blk_put_request(blk_mq_rq_from_pdu(cmd));
184 }
185
186 /*
187 * Once we add support for one hctx per mtip group, this will change a bit
188 */
189 static struct request *mtip_rq_from_tag(struct driver_data *dd,
190 unsigned int tag)
191 {
192 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
193
194 return blk_mq_tag_to_rq(hctx->tags, tag);
195 }
196
197 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
198 unsigned int tag)
199 {
200 struct request *rq = mtip_rq_from_tag(dd, tag);
201
202 return blk_mq_rq_to_pdu(rq);
203 }
204
205 /*
206 * IO completion function.
207 *
208 * This completion function is called by the driver ISR when a
209 * command that was issued by the kernel completes. It first calls the
210 * asynchronous completion function which normally calls back into the block
211 * layer passing the asynchronous callback data, then unmaps the
212 * scatter list associated with the completed command, and finally
213 * clears the allocated bit associated with the completed command.
214 *
215 * @port Pointer to the port data structure.
216 * @tag Tag of the command.
217 * @data Pointer to driver_data.
218 * @status Completion status.
219 *
220 * return value
221 * None
222 */
223 static void mtip_async_complete(struct mtip_port *port,
224 int tag, struct mtip_cmd *cmd, int status)
225 {
226 struct driver_data *dd = port->dd;
227 struct request *rq;
228
229 if (unlikely(!dd) || unlikely(!port))
230 return;
231
232 if (unlikely(status == PORT_IRQ_TF_ERR)) {
233 dev_warn(&port->dd->pdev->dev,
234 "Command tag %d failed due to TFE\n", tag);
235 }
236
237 /* Unmap the DMA scatter list entries */
238 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
239
240 rq = mtip_rq_from_tag(dd, tag);
241
242 if (unlikely(cmd->unaligned))
243 up(&port->cmd_slot_unal);
244
245 blk_mq_end_request(rq, status ? -EIO : 0);
246 }
247
248 /*
249 * Reset the HBA (without sleeping)
250 *
251 * @dd Pointer to the driver data structure.
252 *
253 * return value
254 * 0 The reset was successful.
255 * -1 The HBA Reset bit did not clear.
256 */
257 static int mtip_hba_reset(struct driver_data *dd)
258 {
259 unsigned long timeout;
260
261 /* Set the reset bit */
262 writel(HOST_RESET, dd->mmio + HOST_CTL);
263
264 /* Flush */
265 readl(dd->mmio + HOST_CTL);
266
267 /*
268 * Spin for up to 10 seconds waiting for reset acknowledgement. Spec
269 * is 1 sec but in LUN failure conditions, up to 10 secs are required
270 */
271 timeout = jiffies + msecs_to_jiffies(10000);
272 do {
273 mdelay(10);
274 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
275 return -1;
276
277 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
278 && time_before(jiffies, timeout));
279
280 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
281 return -1;
282
283 return 0;
284 }
285
286 /*
287 * Issue a command to the hardware.
288 *
289 * Set the appropriate bit in the s_active and Command Issue hardware
290 * registers, causing hardware command processing to begin.
291 *
292 * @port Pointer to the port structure.
293 * @tag The tag of the command to be issued.
294 *
295 * return value
296 * None
297 */
298 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
299 {
300 int group = tag >> 5;
301
302 /* guard SACT and CI registers */
303 spin_lock(&port->cmd_issue_lock[group]);
304 writel((1 << MTIP_TAG_BIT(tag)),
305 port->s_active[MTIP_TAG_INDEX(tag)]);
306 writel((1 << MTIP_TAG_BIT(tag)),
307 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
308 spin_unlock(&port->cmd_issue_lock[group]);
309 }
310
311 /*
312 * Enable/disable the reception of FIS
313 *
314 * @port Pointer to the port data structure
315 * @enable 1 to enable, 0 to disable
316 *
317 * return value
318 * Previous state: 1 enabled, 0 disabled
319 */
320 static int mtip_enable_fis(struct mtip_port *port, int enable)
321 {
322 u32 tmp;
323
324 /* enable FIS reception */
325 tmp = readl(port->mmio + PORT_CMD);
326 if (enable)
327 writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
328 else
329 writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
330
331 /* Flush */
332 readl(port->mmio + PORT_CMD);
333
334 return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
335 }
336
337 /*
338 * Enable/disable the DMA engine
339 *
340 * @port Pointer to the port data structure
341 * @enable 1 to enable, 0 to disable
342 *
343 * return value
344 * Previous state: 1 enabled, 0 disabled.
345 */
346 static int mtip_enable_engine(struct mtip_port *port, int enable)
347 {
348 u32 tmp;
349
350 /* enable FIS reception */
351 tmp = readl(port->mmio + PORT_CMD);
352 if (enable)
353 writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
354 else
355 writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
356
357 readl(port->mmio + PORT_CMD);
358 return (((tmp & PORT_CMD_START) == PORT_CMD_START));
359 }
360
361 /*
362 * Enables the port DMA engine and FIS reception.
363 *
364 * return value
365 * None
366 */
367 static inline void mtip_start_port(struct mtip_port *port)
368 {
369 /* Enable FIS reception */
370 mtip_enable_fis(port, 1);
371
372 /* Enable the DMA engine */
373 mtip_enable_engine(port, 1);
374 }
375
376 /*
377 * Deinitialize a port by disabling port interrupts, the DMA engine,
378 * and FIS reception.
379 *
380 * @port Pointer to the port structure
381 *
382 * return value
383 * None
384 */
385 static inline void mtip_deinit_port(struct mtip_port *port)
386 {
387 /* Disable interrupts on this port */
388 writel(0, port->mmio + PORT_IRQ_MASK);
389
390 /* Disable the DMA engine */
391 mtip_enable_engine(port, 0);
392
393 /* Disable FIS reception */
394 mtip_enable_fis(port, 0);
395 }
396
397 /*
398 * Initialize a port.
399 *
400 * This function deinitializes the port by calling mtip_deinit_port() and
401 * then initializes it by setting the command header and RX FIS addresses,
402 * clearing the SError register and any pending port interrupts before
403 * re-enabling the default set of port interrupts.
404 *
405 * @port Pointer to the port structure.
406 *
407 * return value
408 * None
409 */
410 static void mtip_init_port(struct mtip_port *port)
411 {
412 int i;
413 mtip_deinit_port(port);
414
415 /* Program the command list base and FIS base addresses */
416 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
417 writel((port->command_list_dma >> 16) >> 16,
418 port->mmio + PORT_LST_ADDR_HI);
419 writel((port->rxfis_dma >> 16) >> 16,
420 port->mmio + PORT_FIS_ADDR_HI);
421 }
422
423 writel(port->command_list_dma & 0xFFFFFFFF,
424 port->mmio + PORT_LST_ADDR);
425 writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
426
427 /* Clear SError */
428 writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
429
430 /* reset the completed registers.*/
431 for (i = 0; i < port->dd->slot_groups; i++)
432 writel(0xFFFFFFFF, port->completed[i]);
433
434 /* Clear any pending interrupts for this port */
435 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
436
437 /* Clear any pending interrupts on the HBA. */
438 writel(readl(port->dd->mmio + HOST_IRQ_STAT),
439 port->dd->mmio + HOST_IRQ_STAT);
440
441 /* Enable port interrupts */
442 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
443 }
444
445 /*
446 * Restart a port
447 *
448 * @port Pointer to the port data structure.
449 *
450 * return value
451 * None
452 */
453 static void mtip_restart_port(struct mtip_port *port)
454 {
455 unsigned long timeout;
456
457 /* Disable the DMA engine */
458 mtip_enable_engine(port, 0);
459
460 /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
461 timeout = jiffies + msecs_to_jiffies(500);
462 while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
463 && time_before(jiffies, timeout))
464 ;
465
466 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
467 return;
468
469 /*
470 * Chip quirk: escalate to hba reset if
471 * PxCMD.CR not clear after 500 ms
472 */
473 if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
474 dev_warn(&port->dd->pdev->dev,
475 "PxCMD.CR not clear, escalating reset\n");
476
477 if (mtip_hba_reset(port->dd))
478 dev_err(&port->dd->pdev->dev,
479 "HBA reset escalation failed.\n");
480
481 /* 30 ms delay before com reset to quiesce chip */
482 mdelay(30);
483 }
484
485 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
486
487 /* Set PxSCTL.DET */
488 writel(readl(port->mmio + PORT_SCR_CTL) |
489 1, port->mmio + PORT_SCR_CTL);
490 readl(port->mmio + PORT_SCR_CTL);
491
492 /* Wait 1 ms to quiesce chip function */
493 timeout = jiffies + msecs_to_jiffies(1);
494 while (time_before(jiffies, timeout))
495 ;
496
497 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
498 return;
499
500 /* Clear PxSCTL.DET */
501 writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
502 port->mmio + PORT_SCR_CTL);
503 readl(port->mmio + PORT_SCR_CTL);
504
505 /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
506 timeout = jiffies + msecs_to_jiffies(500);
507 while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
508 && time_before(jiffies, timeout))
509 ;
510
511 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
512 return;
513
514 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
515 dev_warn(&port->dd->pdev->dev,
516 "COM reset failed\n");
517
518 mtip_init_port(port);
519 mtip_start_port(port);
520
521 }
522
523 static int mtip_device_reset(struct driver_data *dd)
524 {
525 int rv = 0;
526
527 if (mtip_check_surprise_removal(dd->pdev))
528 return 0;
529
530 if (mtip_hba_reset(dd) < 0)
531 rv = -EFAULT;
532
533 mdelay(1);
534 mtip_init_port(dd->port);
535 mtip_start_port(dd->port);
536
537 /* Enable interrupts on the HBA. */
538 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
539 dd->mmio + HOST_CTL);
540 return rv;
541 }
542
543 /*
544 * Helper function for tag logging
545 */
546 static void print_tags(struct driver_data *dd,
547 char *msg,
548 unsigned long *tagbits,
549 int cnt)
550 {
551 unsigned char tagmap[128];
552 int group, tagmap_len = 0;
553
554 memset(tagmap, 0, sizeof(tagmap));
555 for (group = SLOTBITS_IN_LONGS; group > 0; group--)
556 tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
557 tagbits[group-1]);
558 dev_warn(&dd->pdev->dev,
559 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
560 }
561
562 /*
563 * Internal command completion callback function.
564 *
565 * This function is normally called by the driver ISR when an internal
566 * command completed. This function signals the command completion by
567 * calling complete().
568 *
569 * @port Pointer to the port data structure.
570 * @tag Tag of the command that has completed.
571 * @data Pointer to a completion structure.
572 * @status Completion status.
573 *
574 * return value
575 * None
576 */
577 static void mtip_completion(struct mtip_port *port,
578 int tag, struct mtip_cmd *command, int status)
579 {
580 struct completion *waiting = command->comp_data;
581 if (unlikely(status == PORT_IRQ_TF_ERR))
582 dev_warn(&port->dd->pdev->dev,
583 "Internal command %d completed with TFE\n", tag);
584
585 complete(waiting);
586 }
587
588 static void mtip_null_completion(struct mtip_port *port,
589 int tag, struct mtip_cmd *command, int status)
590 {
591 }
592
593 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
594 dma_addr_t buffer_dma, unsigned int sectors);
595 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
596 struct smart_attr *attrib);
597 /*
598 * Handle an error.
599 *
600 * @dd Pointer to the DRIVER_DATA structure.
601 *
602 * return value
603 * None
604 */
605 static void mtip_handle_tfe(struct driver_data *dd)
606 {
607 int group, tag, bit, reissue, rv;
608 struct mtip_port *port;
609 struct mtip_cmd *cmd;
610 u32 completed;
611 struct host_to_dev_fis *fis;
612 unsigned long tagaccum[SLOTBITS_IN_LONGS];
613 unsigned int cmd_cnt = 0;
614 unsigned char *buf;
615 char *fail_reason = NULL;
616 int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
617
618 dev_warn(&dd->pdev->dev, "Taskfile error\n");
619
620 port = dd->port;
621
622 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
623
624 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
625 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
626 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
627
628 if (cmd->comp_data && cmd->comp_func) {
629 cmd->comp_func(port, MTIP_TAG_INTERNAL,
630 cmd, PORT_IRQ_TF_ERR);
631 }
632 goto handle_tfe_exit;
633 }
634
635 /* clear the tag accumulator */
636 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
637
638 /* Loop through all the groups */
639 for (group = 0; group < dd->slot_groups; group++) {
640 completed = readl(port->completed[group]);
641
642 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
643
644 /* clear completed status register in the hardware.*/
645 writel(completed, port->completed[group]);
646
647 /* Process successfully completed commands */
648 for (bit = 0; bit < 32 && completed; bit++) {
649 if (!(completed & (1<<bit)))
650 continue;
651 tag = (group << 5) + bit;
652
653 /* Skip the internal command slot */
654 if (tag == MTIP_TAG_INTERNAL)
655 continue;
656
657 cmd = mtip_cmd_from_tag(dd, tag);
658 if (likely(cmd->comp_func)) {
659 set_bit(tag, tagaccum);
660 cmd_cnt++;
661 cmd->comp_func(port, tag, cmd, 0);
662 } else {
663 dev_err(&port->dd->pdev->dev,
664 "Missing completion func for tag %d",
665 tag);
666 if (mtip_check_surprise_removal(dd->pdev)) {
667 /* don't proceed further */
668 return;
669 }
670 }
671 }
672 }
673
674 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
675
676 /* Restart the port */
677 mdelay(20);
678 mtip_restart_port(port);
679
680 /* Trying to determine the cause of the error */
681 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
682 dd->port->log_buf,
683 dd->port->log_buf_dma, 1);
684 if (rv) {
685 dev_warn(&dd->pdev->dev,
686 "Error in READ LOG EXT (10h) command\n");
687 /* non-critical error, don't fail the load */
688 } else {
689 buf = (unsigned char *)dd->port->log_buf;
690 if (buf[259] & 0x1) {
691 dev_info(&dd->pdev->dev,
692 "Write protect bit is set.\n");
693 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
694 fail_all_ncq_write = 1;
695 fail_reason = "write protect";
696 }
697 if (buf[288] == 0xF7) {
698 dev_info(&dd->pdev->dev,
699 "Exceeded Tmax, drive in thermal shutdown.\n");
700 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
701 fail_all_ncq_cmds = 1;
702 fail_reason = "thermal shutdown";
703 }
704 if (buf[288] == 0xBF) {
705 set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
706 dev_info(&dd->pdev->dev,
707 "Drive indicates rebuild has failed. Secure erase required.\n");
708 fail_all_ncq_cmds = 1;
709 fail_reason = "rebuild failed";
710 }
711 }
712
713 /* clear the tag accumulator */
714 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
715
716 /* Loop through all the groups */
717 for (group = 0; group < dd->slot_groups; group++) {
718 for (bit = 0; bit < 32; bit++) {
719 reissue = 1;
720 tag = (group << 5) + bit;
721 cmd = mtip_cmd_from_tag(dd, tag);
722
723 fis = (struct host_to_dev_fis *)cmd->command;
724
725 /* Should re-issue? */
726 if (tag == MTIP_TAG_INTERNAL ||
727 fis->command == ATA_CMD_SET_FEATURES)
728 reissue = 0;
729 else {
730 if (fail_all_ncq_cmds ||
731 (fail_all_ncq_write &&
732 fis->command == ATA_CMD_FPDMA_WRITE)) {
733 dev_warn(&dd->pdev->dev,
734 " Fail: %s w/tag %d [%s].\n",
735 fis->command == ATA_CMD_FPDMA_WRITE ?
736 "write" : "read",
737 tag,
738 fail_reason != NULL ?
739 fail_reason : "unknown");
740 if (cmd->comp_func) {
741 cmd->comp_func(port, tag,
742 cmd, -ENODATA);
743 }
744 continue;
745 }
746 }
747
748 /*
749 * First check if this command has
750 * exceeded its retries.
751 */
752 if (reissue && (cmd->retries-- > 0)) {
753
754 set_bit(tag, tagaccum);
755
756 /* Re-issue the command. */
757 mtip_issue_ncq_command(port, tag);
758
759 continue;
760 }
761
762 /* Retire a command that will not be reissued */
763 dev_warn(&port->dd->pdev->dev,
764 "retiring tag %d\n", tag);
765
766 if (cmd->comp_func)
767 cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
768 else
769 dev_warn(&port->dd->pdev->dev,
770 "Bad completion for tag %d\n",
771 tag);
772 }
773 }
774 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
775
776 handle_tfe_exit:
777 /* clear eh_active */
778 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
779 wake_up_interruptible(&port->svc_wait);
780 }
781
782 /*
783 * Handle a set device bits interrupt
784 */
785 static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
786 u32 completed)
787 {
788 struct driver_data *dd = port->dd;
789 int tag, bit;
790 struct mtip_cmd *command;
791
792 if (!completed) {
793 WARN_ON_ONCE(!completed);
794 return;
795 }
796 /* clear completed status register in the hardware.*/
797 writel(completed, port->completed[group]);
798
799 /* Process completed commands. */
800 for (bit = 0; (bit < 32) && completed; bit++) {
801 if (completed & 0x01) {
802 tag = (group << 5) | bit;
803
804 /* skip internal command slot. */
805 if (unlikely(tag == MTIP_TAG_INTERNAL))
806 continue;
807
808 command = mtip_cmd_from_tag(dd, tag);
809 if (likely(command->comp_func))
810 command->comp_func(port, tag, command, 0);
811 else {
812 dev_dbg(&dd->pdev->dev,
813 "Null completion for tag %d",
814 tag);
815
816 if (mtip_check_surprise_removal(
817 dd->pdev)) {
818 return;
819 }
820 }
821 }
822 completed >>= 1;
823 }
824
825 /* If last, re-enable interrupts */
826 if (atomic_dec_return(&dd->irq_workers_active) == 0)
827 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
828 }
829
830 /*
831 * Process legacy pio and d2h interrupts
832 */
833 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
834 {
835 struct mtip_port *port = dd->port;
836 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
837
838 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
839 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
840 & (1 << MTIP_TAG_INTERNAL))) {
841 if (cmd->comp_func) {
842 cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
843 return;
844 }
845 }
846
847 return;
848 }
849
850 /*
851 * Demux and handle errors
852 */
853 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
854 {
855
856 if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
857 dev_warn(&dd->pdev->dev,
858 "Clearing PxSERR.DIAG.x\n");
859 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
860 }
861
862 if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
863 dev_warn(&dd->pdev->dev,
864 "Clearing PxSERR.DIAG.n\n");
865 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
866 }
867
868 if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
869 dev_warn(&dd->pdev->dev,
870 "Port stat errors %x unhandled\n",
871 (port_stat & ~PORT_IRQ_HANDLED));
872 if (mtip_check_surprise_removal(dd->pdev))
873 return;
874 }
875 if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
876 set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
877 wake_up_interruptible(&dd->port->svc_wait);
878 }
879 }
880
881 static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
882 {
883 struct driver_data *dd = (struct driver_data *) data;
884 struct mtip_port *port = dd->port;
885 u32 hba_stat, port_stat;
886 int rv = IRQ_NONE;
887 int do_irq_enable = 1, i, workers;
888 struct mtip_work *twork;
889
890 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
891 if (hba_stat) {
892 rv = IRQ_HANDLED;
893
894 /* Acknowledge the interrupt status on the port.*/
895 port_stat = readl(port->mmio + PORT_IRQ_STAT);
896 if (unlikely(port_stat == 0xFFFFFFFF)) {
897 mtip_check_surprise_removal(dd->pdev);
898 return IRQ_HANDLED;
899 }
900 writel(port_stat, port->mmio + PORT_IRQ_STAT);
901
902 /* Demux port status */
903 if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
904 do_irq_enable = 0;
905 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
906
907 /* Start at 1: group zero is always local? */
908 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
909 i++) {
910 twork = &dd->work[i];
911 twork->completed = readl(port->completed[i]);
912 if (twork->completed)
913 workers++;
914 }
915
916 atomic_set(&dd->irq_workers_active, workers);
917 if (workers) {
918 for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
919 twork = &dd->work[i];
920 if (twork->completed)
921 queue_work_on(
922 twork->cpu_binding,
923 dd->isr_workq,
924 &twork->work);
925 }
926
927 if (likely(dd->work[0].completed))
928 mtip_workq_sdbfx(port, 0,
929 dd->work[0].completed);
930
931 } else {
932 /*
933 * Chip quirk: SDB interrupt but nothing
934 * to complete
935 */
936 do_irq_enable = 1;
937 }
938 }
939
940 if (unlikely(port_stat & PORT_IRQ_ERR)) {
941 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
942 /* don't proceed further */
943 return IRQ_HANDLED;
944 }
945 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
946 &dd->dd_flag))
947 return rv;
948
949 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
950 }
951
952 if (unlikely(port_stat & PORT_IRQ_LEGACY))
953 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
954 }
955
956 /* acknowledge interrupt */
957 if (unlikely(do_irq_enable))
958 writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
959
960 return rv;
961 }
962
963 /*
964 * HBA interrupt subroutine.
965 *
966 * @irq IRQ number.
967 * @instance Pointer to the driver data structure.
968 *
969 * return value
970 * IRQ_HANDLED A HBA interrupt was pending and handled.
971 * IRQ_NONE This interrupt was not for the HBA.
972 */
973 static irqreturn_t mtip_irq_handler(int irq, void *instance)
974 {
975 struct driver_data *dd = instance;
976
977 return mtip_handle_irq(dd);
978 }
979
980 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
981 {
982 writel(1 << MTIP_TAG_BIT(tag),
983 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
984 }
985
986 static bool mtip_pause_ncq(struct mtip_port *port,
987 struct host_to_dev_fis *fis)
988 {
989 struct host_to_dev_fis *reply;
990 unsigned long task_file_data;
991
992 reply = port->rxfis + RX_FIS_D2H_REG;
993 task_file_data = readl(port->mmio+PORT_TFDATA);
994
995 if ((task_file_data & 1))
996 return false;
997
998 if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
999 port->ic_pause_timer = jiffies;
1000 return true;
1001 } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
1002 (fis->features == 0x03)) {
1003 set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1004 port->ic_pause_timer = jiffies;
1005 return true;
1006 } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
1007 ((fis->command == 0xFC) &&
1008 (fis->features == 0x27 || fis->features == 0x72 ||
1009 fis->features == 0x62 || fis->features == 0x26))) {
1010 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1011 /* Com reset after secure erase or lowlevel format */
1012 mtip_restart_port(port);
1013 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1014 return false;
1015 }
1016
1017 return false;
1018 }
1019
1020 /*
1021 * Wait for port to quiesce
1022 *
1023 * @port Pointer to port data structure
1024 * @timeout Max duration to wait (ms)
1025 *
1026 * return value
1027 * 0 Success
1028 * -EBUSY Commands still active
1029 */
1030 static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
1031 {
1032 unsigned long to;
1033 unsigned int n;
1034 unsigned int active = 1;
1035
1036 blk_mq_stop_hw_queues(port->dd->queue);
1037
1038 to = jiffies + msecs_to_jiffies(timeout);
1039 do {
1040 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
1041 test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
1042 msleep(20);
1043 continue; /* svc thd is actively issuing commands */
1044 }
1045
1046 msleep(100);
1047 if (mtip_check_surprise_removal(port->dd->pdev))
1048 goto err_fault;
1049 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1050 goto err_fault;
1051
1052 /*
1053 * Ignore s_active bit 0 of array element 0.
1054 * This bit will always be set
1055 */
1056 active = readl(port->s_active[0]) & 0xFFFFFFFE;
1057 for (n = 1; n < port->dd->slot_groups; n++)
1058 active |= readl(port->s_active[n]);
1059
1060 if (!active)
1061 break;
1062 } while (time_before(jiffies, to));
1063
1064 blk_mq_start_stopped_hw_queues(port->dd->queue, true);
1065 return active ? -EBUSY : 0;
1066 err_fault:
1067 blk_mq_start_stopped_hw_queues(port->dd->queue, true);
1068 return -EFAULT;
1069 }
1070
1071 /*
1072 * Execute an internal command and wait for the completion.
1073 *
1074 * @port Pointer to the port data structure.
1075 * @fis Pointer to the FIS that describes the command.
1076 * @fis_len Length in WORDS of the FIS.
1077 * @buffer DMA accessible for command data.
1078 * @buf_len Length, in bytes, of the data buffer.
1079 * @opts Command header options, excluding the FIS length
1080 * and the number of PRD entries.
1081 * @timeout Time in ms to wait for the command to complete.
1082 *
1083 * return value
1084 * 0 Command completed successfully.
1085 * -EFAULT The buffer address is not correctly aligned.
1086 * -EBUSY Internal command or other IO in progress.
1087 * -EAGAIN Time out waiting for command to complete.
1088 */
1089 static int mtip_exec_internal_command(struct mtip_port *port,
1090 struct host_to_dev_fis *fis,
1091 int fis_len,
1092 dma_addr_t buffer,
1093 int buf_len,
1094 u32 opts,
1095 gfp_t atomic,
1096 unsigned long timeout)
1097 {
1098 struct mtip_cmd_sg *command_sg;
1099 DECLARE_COMPLETION_ONSTACK(wait);
1100 struct mtip_cmd *int_cmd;
1101 struct driver_data *dd = port->dd;
1102 int rv = 0;
1103
1104 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1105 if (buffer & 0x00000007) {
1106 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
1107 return -EFAULT;
1108 }
1109
1110 int_cmd = mtip_get_int_command(dd);
1111
1112 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1113
1114 if (fis->command == ATA_CMD_SEC_ERASE_PREP)
1115 set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1116
1117 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1118
1119 if (atomic == GFP_KERNEL) {
1120 if (fis->command != ATA_CMD_STANDBYNOW1) {
1121 /* wait for io to complete if non atomic */
1122 if (mtip_quiesce_io(port,
1123 MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
1124 dev_warn(&dd->pdev->dev,
1125 "Failed to quiesce IO\n");
1126 mtip_put_int_command(dd, int_cmd);
1127 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1128 wake_up_interruptible(&port->svc_wait);
1129 return -EBUSY;
1130 }
1131 }
1132
1133 /* Set the completion function and data for the command. */
1134 int_cmd->comp_data = &wait;
1135 int_cmd->comp_func = mtip_completion;
1136
1137 } else {
1138 /* Clear completion - we're going to poll */
1139 int_cmd->comp_data = NULL;
1140 int_cmd->comp_func = mtip_null_completion;
1141 }
1142
1143 /* Copy the command to the command table */
1144 memcpy(int_cmd->command, fis, fis_len*4);
1145
1146 /* Populate the SG list */
1147 int_cmd->command_header->opts =
1148 __force_bit2int cpu_to_le32(opts | fis_len);
1149 if (buf_len) {
1150 command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
1151
1152 command_sg->info =
1153 __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
1154 command_sg->dba =
1155 __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
1156 command_sg->dba_upper =
1157 __force_bit2int cpu_to_le32((buffer >> 16) >> 16);
1158
1159 int_cmd->command_header->opts |=
1160 __force_bit2int cpu_to_le32((1 << 16));
1161 }
1162
1163 /* Populate the command header */
1164 int_cmd->command_header->byte_count = 0;
1165
1166 /* Issue the command to the hardware */
1167 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
1168
1169 if (atomic == GFP_KERNEL) {
1170 /* Wait for the command to complete or timeout. */
1171 if ((rv = wait_for_completion_interruptible_timeout(
1172 &wait,
1173 msecs_to_jiffies(timeout))) <= 0) {
1174 if (rv == -ERESTARTSYS) { /* interrupted */
1175 dev_err(&dd->pdev->dev,
1176 "Internal command [%02X] was interrupted after %lu ms\n",
1177 fis->command, timeout);
1178 rv = -EINTR;
1179 goto exec_ic_exit;
1180 } else if (rv == 0) /* timeout */
1181 dev_err(&dd->pdev->dev,
1182 "Internal command did not complete [%02X] within timeout of %lu ms\n",
1183 fis->command, timeout);
1184 else
1185 dev_err(&dd->pdev->dev,
1186 "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
1187 fis->command, rv, timeout);
1188
1189 if (mtip_check_surprise_removal(dd->pdev) ||
1190 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1191 &dd->dd_flag)) {
1192 dev_err(&dd->pdev->dev,
1193 "Internal command [%02X] wait returned due to SR\n",
1194 fis->command);
1195 rv = -ENXIO;
1196 goto exec_ic_exit;
1197 }
1198 mtip_device_reset(dd); /* recover from timeout issue */
1199 rv = -EAGAIN;
1200 goto exec_ic_exit;
1201 }
1202 } else {
1203 u32 hba_stat, port_stat;
1204
1205 /* Spin for <timeout> checking if command still outstanding */
1206 timeout = jiffies + msecs_to_jiffies(timeout);
1207 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1208 & (1 << MTIP_TAG_INTERNAL))
1209 && time_before(jiffies, timeout)) {
1210 if (mtip_check_surprise_removal(dd->pdev)) {
1211 rv = -ENXIO;
1212 goto exec_ic_exit;
1213 }
1214 if ((fis->command != ATA_CMD_STANDBYNOW1) &&
1215 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1216 &dd->dd_flag)) {
1217 rv = -ENXIO;
1218 goto exec_ic_exit;
1219 }
1220 port_stat = readl(port->mmio + PORT_IRQ_STAT);
1221 if (!port_stat)
1222 continue;
1223
1224 if (port_stat & PORT_IRQ_ERR) {
1225 dev_err(&dd->pdev->dev,
1226 "Internal command [%02X] failed\n",
1227 fis->command);
1228 mtip_device_reset(dd);
1229 rv = -EIO;
1230 goto exec_ic_exit;
1231 } else {
1232 writel(port_stat, port->mmio + PORT_IRQ_STAT);
1233 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
1234 if (hba_stat)
1235 writel(hba_stat,
1236 dd->mmio + HOST_IRQ_STAT);
1237 }
1238 break;
1239 }
1240 }
1241
1242 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1243 & (1 << MTIP_TAG_INTERNAL)) {
1244 rv = -ENXIO;
1245 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
1246 mtip_device_reset(dd);
1247 rv = -EAGAIN;
1248 }
1249 }
1250 exec_ic_exit:
1251 /* Clear the allocated and active bits for the internal command. */
1252 mtip_put_int_command(dd, int_cmd);
1253 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1254 if (rv >= 0 && mtip_pause_ncq(port, fis)) {
1255 /* NCQ paused */
1256 return rv;
1257 }
1258 wake_up_interruptible(&port->svc_wait);
1259
1260 return rv;
1261 }
1262
1263 /*
1264 * Byte-swap ATA ID strings.
1265 *
1266 * ATA identify data contains strings in byte-swapped 16-bit words.
1267 * They must be swapped (on all architectures) to be usable as C strings.
1268 * This function swaps bytes in-place.
1269 *
1270 * @buf The buffer location of the string
1271 * @len The number of bytes to swap
1272 *
1273 * return value
1274 * None
1275 */
1276 static inline void ata_swap_string(u16 *buf, unsigned int len)
1277 {
1278 int i;
1279 for (i = 0; i < (len/2); i++)
1280 be16_to_cpus(&buf[i]);
1281 }
1282
1283 static void mtip_set_timeout(struct driver_data *dd,
1284 struct host_to_dev_fis *fis,
1285 unsigned int *timeout, u8 erasemode)
1286 {
1287 switch (fis->command) {
1288 case ATA_CMD_DOWNLOAD_MICRO:
1289 *timeout = 120000; /* 2 minutes */
1290 break;
1291 case ATA_CMD_SEC_ERASE_UNIT:
1292 case 0xFC:
1293 if (erasemode)
1294 *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1295 else
1296 *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1297 break;
1298 case ATA_CMD_STANDBYNOW1:
1299 *timeout = 120000; /* 2 minutes */
1300 break;
1301 case 0xF7:
1302 case 0xFA:
1303 *timeout = 60000; /* 60 seconds */
1304 break;
1305 case ATA_CMD_SMART:
1306 *timeout = 15000; /* 15 seconds */
1307 break;
1308 default:
1309 *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS;
1310 break;
1311 }
1312 }
1313
1314 /*
1315 * Request the device identity information.
1316 *
1317 * If a user space buffer is not specified, i.e. is NULL, the
1318 * identify information is still read from the drive and placed
1319 * into the identify data buffer (@e port->identify) in the
1320 * port data structure.
1321 * When the identify buffer contains valid identify information @e
1322 * port->identify_valid is non-zero.
1323 *
1324 * @port Pointer to the port structure.
1325 * @user_buffer A user space buffer where the identify data should be
1326 * copied.
1327 *
1328 * return value
1329 * 0 Command completed successfully.
1330 * -EFAULT An error occurred while coping data to the user buffer.
1331 * -1 Command failed.
1332 */
1333 static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
1334 {
1335 int rv = 0;
1336 struct host_to_dev_fis fis;
1337
1338 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1339 return -EFAULT;
1340
1341 /* Build the FIS. */
1342 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1343 fis.type = 0x27;
1344 fis.opts = 1 << 7;
1345 fis.command = ATA_CMD_ID_ATA;
1346
1347 /* Set the identify information as invalid. */
1348 port->identify_valid = 0;
1349
1350 /* Clear the identify information. */
1351 memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
1352
1353 /* Execute the command. */
1354 if (mtip_exec_internal_command(port,
1355 &fis,
1356 5,
1357 port->identify_dma,
1358 sizeof(u16) * ATA_ID_WORDS,
1359 0,
1360 GFP_KERNEL,
1361 MTIP_INT_CMD_TIMEOUT_MS)
1362 < 0) {
1363 rv = -1;
1364 goto out;
1365 }
1366
1367 /*
1368 * Perform any necessary byte-swapping. Yes, the kernel does in fact
1369 * perform field-sensitive swapping on the string fields.
1370 * See the kernel use of ata_id_string() for proof of this.
1371 */
1372 #ifdef __LITTLE_ENDIAN
1373 ata_swap_string(port->identify + 27, 40); /* model string*/
1374 ata_swap_string(port->identify + 23, 8); /* firmware string*/
1375 ata_swap_string(port->identify + 10, 20); /* serial# string*/
1376 #else
1377 {
1378 int i;
1379 for (i = 0; i < ATA_ID_WORDS; i++)
1380 port->identify[i] = le16_to_cpu(port->identify[i]);
1381 }
1382 #endif
1383
1384 /* Check security locked state */
1385 if (port->identify[128] & 0x4)
1386 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1387 else
1388 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1389
1390 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
1391 /* Demux ID.DRAT & ID.RZAT to determine trim support */
1392 if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
1393 port->dd->trim_supp = true;
1394 else
1395 #endif
1396 port->dd->trim_supp = false;
1397
1398 /* Set the identify buffer as valid. */
1399 port->identify_valid = 1;
1400
1401 if (user_buffer) {
1402 if (copy_to_user(
1403 user_buffer,
1404 port->identify,
1405 ATA_ID_WORDS * sizeof(u16))) {
1406 rv = -EFAULT;
1407 goto out;
1408 }
1409 }
1410
1411 out:
1412 return rv;
1413 }
1414
1415 /*
1416 * Issue a standby immediate command to the device.
1417 *
1418 * @port Pointer to the port structure.
1419 *
1420 * return value
1421 * 0 Command was executed successfully.
1422 * -1 An error occurred while executing the command.
1423 */
1424 static int mtip_standby_immediate(struct mtip_port *port)
1425 {
1426 int rv;
1427 struct host_to_dev_fis fis;
1428 unsigned long start;
1429 unsigned int timeout;
1430
1431 /* Build the FIS. */
1432 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1433 fis.type = 0x27;
1434 fis.opts = 1 << 7;
1435 fis.command = ATA_CMD_STANDBYNOW1;
1436
1437 mtip_set_timeout(port->dd, &fis, &timeout, 0);
1438
1439 start = jiffies;
1440 rv = mtip_exec_internal_command(port,
1441 &fis,
1442 5,
1443 0,
1444 0,
1445 0,
1446 GFP_ATOMIC,
1447 timeout);
1448 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
1449 jiffies_to_msecs(jiffies - start));
1450 if (rv)
1451 dev_warn(&port->dd->pdev->dev,
1452 "STANDBY IMMEDIATE command failed.\n");
1453
1454 return rv;
1455 }
1456
1457 /*
1458 * Issue a READ LOG EXT command to the device.
1459 *
1460 * @port pointer to the port structure.
1461 * @page page number to fetch
1462 * @buffer pointer to buffer
1463 * @buffer_dma dma address corresponding to @buffer
1464 * @sectors page length to fetch, in sectors
1465 *
1466 * return value
1467 * @rv return value from mtip_exec_internal_command()
1468 */
1469 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
1470 dma_addr_t buffer_dma, unsigned int sectors)
1471 {
1472 struct host_to_dev_fis fis;
1473
1474 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1475 fis.type = 0x27;
1476 fis.opts = 1 << 7;
1477 fis.command = ATA_CMD_READ_LOG_EXT;
1478 fis.sect_count = sectors & 0xFF;
1479 fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
1480 fis.lba_low = page;
1481 fis.lba_mid = 0;
1482 fis.device = ATA_DEVICE_OBS;
1483
1484 memset(buffer, 0, sectors * ATA_SECT_SIZE);
1485
1486 return mtip_exec_internal_command(port,
1487 &fis,
1488 5,
1489 buffer_dma,
1490 sectors * ATA_SECT_SIZE,
1491 0,
1492 GFP_ATOMIC,
1493 MTIP_INT_CMD_TIMEOUT_MS);
1494 }
1495
1496 /*
1497 * Issue a SMART READ DATA command to the device.
1498 *
1499 * @port pointer to the port structure.
1500 * @buffer pointer to buffer
1501 * @buffer_dma dma address corresponding to @buffer
1502 *
1503 * return value
1504 * @rv return value from mtip_exec_internal_command()
1505 */
1506 static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
1507 dma_addr_t buffer_dma)
1508 {
1509 struct host_to_dev_fis fis;
1510
1511 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1512 fis.type = 0x27;
1513 fis.opts = 1 << 7;
1514 fis.command = ATA_CMD_SMART;
1515 fis.features = 0xD0;
1516 fis.sect_count = 1;
1517 fis.lba_mid = 0x4F;
1518 fis.lba_hi = 0xC2;
1519 fis.device = ATA_DEVICE_OBS;
1520
1521 return mtip_exec_internal_command(port,
1522 &fis,
1523 5,
1524 buffer_dma,
1525 ATA_SECT_SIZE,
1526 0,
1527 GFP_ATOMIC,
1528 15000);
1529 }
1530
1531 /*
1532 * Get the value of a smart attribute
1533 *
1534 * @port pointer to the port structure
1535 * @id attribute number
1536 * @attrib pointer to return attrib information corresponding to @id
1537 *
1538 * return value
1539 * -EINVAL NULL buffer passed or unsupported attribute @id.
1540 * -EPERM Identify data not valid, SMART not supported or not enabled
1541 */
1542 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
1543 struct smart_attr *attrib)
1544 {
1545 int rv, i;
1546 struct smart_attr *pattr;
1547
1548 if (!attrib)
1549 return -EINVAL;
1550
1551 if (!port->identify_valid) {
1552 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
1553 return -EPERM;
1554 }
1555 if (!(port->identify[82] & 0x1)) {
1556 dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
1557 return -EPERM;
1558 }
1559 if (!(port->identify[85] & 0x1)) {
1560 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
1561 return -EPERM;
1562 }
1563
1564 memset(port->smart_buf, 0, ATA_SECT_SIZE);
1565 rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
1566 if (rv) {
1567 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
1568 return rv;
1569 }
1570
1571 pattr = (struct smart_attr *)(port->smart_buf + 2);
1572 for (i = 0; i < 29; i++, pattr++)
1573 if (pattr->attr_id == id) {
1574 memcpy(attrib, pattr, sizeof(struct smart_attr));
1575 break;
1576 }
1577
1578 if (i == 29) {
1579 dev_warn(&port->dd->pdev->dev,
1580 "Query for invalid SMART attribute ID\n");
1581 rv = -EINVAL;
1582 }
1583
1584 return rv;
1585 }
1586
1587 /*
1588 * Trim unused sectors
1589 *
1590 * @dd pointer to driver_data structure
1591 * @lba starting lba
1592 * @len # of 512b sectors to trim
1593 *
1594 * return value
1595 * -ENOMEM Out of dma memory
1596 * -EINVAL Invalid parameters passed in, trim not supported
1597 * -EIO Error submitting trim request to hw
1598 */
1599 static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
1600 unsigned int len)
1601 {
1602 int i, rv = 0;
1603 u64 tlba, tlen, sect_left;
1604 struct mtip_trim_entry *buf;
1605 dma_addr_t dma_addr;
1606 struct host_to_dev_fis fis;
1607
1608 if (!len || dd->trim_supp == false)
1609 return -EINVAL;
1610
1611 /* Trim request too big */
1612 WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
1613
1614 /* Trim request not aligned on 4k boundary */
1615 WARN_ON(len % 8 != 0);
1616
1617 /* Warn if vu_trim structure is too big */
1618 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
1619
1620 /* Allocate a DMA buffer for the trim structure */
1621 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
1622 GFP_KERNEL);
1623 if (!buf)
1624 return -ENOMEM;
1625 memset(buf, 0, ATA_SECT_SIZE);
1626
1627 for (i = 0, sect_left = len, tlba = lba;
1628 i < MTIP_MAX_TRIM_ENTRIES && sect_left;
1629 i++) {
1630 tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
1631 MTIP_MAX_TRIM_ENTRY_LEN :
1632 sect_left);
1633 buf[i].lba = __force_bit2int cpu_to_le32(tlba);
1634 buf[i].range = __force_bit2int cpu_to_le16(tlen);
1635 tlba += tlen;
1636 sect_left -= tlen;
1637 }
1638 WARN_ON(sect_left != 0);
1639
1640 /* Build the fis */
1641 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1642 fis.type = 0x27;
1643 fis.opts = 1 << 7;
1644 fis.command = 0xfb;
1645 fis.features = 0x60;
1646 fis.sect_count = 1;
1647 fis.device = ATA_DEVICE_OBS;
1648
1649 if (mtip_exec_internal_command(dd->port,
1650 &fis,
1651 5,
1652 dma_addr,
1653 ATA_SECT_SIZE,
1654 0,
1655 GFP_KERNEL,
1656 MTIP_TRIM_TIMEOUT_MS) < 0)
1657 rv = -EIO;
1658
1659 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
1660 return rv;
1661 }
1662
1663 /*
1664 * Get the drive capacity.
1665 *
1666 * @dd Pointer to the device data structure.
1667 * @sectors Pointer to the variable that will receive the sector count.
1668 *
1669 * return value
1670 * 1 Capacity was returned successfully.
1671 * 0 The identify information is invalid.
1672 */
1673 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1674 {
1675 struct mtip_port *port = dd->port;
1676 u64 total, raw0, raw1, raw2, raw3;
1677 raw0 = port->identify[100];
1678 raw1 = port->identify[101];
1679 raw2 = port->identify[102];
1680 raw3 = port->identify[103];
1681 total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
1682 *sectors = total;
1683 return (bool) !!port->identify_valid;
1684 }
1685
1686 /*
1687 * Display the identify command data.
1688 *
1689 * @port Pointer to the port data structure.
1690 *
1691 * return value
1692 * None
1693 */
1694 static void mtip_dump_identify(struct mtip_port *port)
1695 {
1696 sector_t sectors;
1697 unsigned short revid;
1698 char cbuf[42];
1699
1700 if (!port->identify_valid)
1701 return;
1702
1703 strlcpy(cbuf, (char *)(port->identify+10), 21);
1704 dev_info(&port->dd->pdev->dev,
1705 "Serial No.: %s\n", cbuf);
1706
1707 strlcpy(cbuf, (char *)(port->identify+23), 9);
1708 dev_info(&port->dd->pdev->dev,
1709 "Firmware Ver.: %s\n", cbuf);
1710
1711 strlcpy(cbuf, (char *)(port->identify+27), 41);
1712 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
1713
1714 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
1715 port->identify[128],
1716 port->identify[128] & 0x4 ? "(LOCKED)" : "");
1717
1718 if (mtip_hw_get_capacity(port->dd, §ors))
1719 dev_info(&port->dd->pdev->dev,
1720 "Capacity: %llu sectors (%llu MB)\n",
1721 (u64)sectors,
1722 ((u64)sectors) * ATA_SECT_SIZE >> 20);
1723
1724 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
1725 switch (revid & 0xFF) {
1726 case 0x1:
1727 strlcpy(cbuf, "A0", 3);
1728 break;
1729 case 0x3:
1730 strlcpy(cbuf, "A2", 3);
1731 break;
1732 default:
1733 strlcpy(cbuf, "?", 2);
1734 break;
1735 }
1736 dev_info(&port->dd->pdev->dev,
1737 "Card Type: %s\n", cbuf);
1738 }
1739
1740 /*
1741 * Map the commands scatter list into the command table.
1742 *
1743 * @command Pointer to the command.
1744 * @nents Number of scatter list entries.
1745 *
1746 * return value
1747 * None
1748 */
1749 static inline void fill_command_sg(struct driver_data *dd,
1750 struct mtip_cmd *command,
1751 int nents)
1752 {
1753 int n;
1754 unsigned int dma_len;
1755 struct mtip_cmd_sg *command_sg;
1756 struct scatterlist *sg = command->sg;
1757
1758 command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
1759
1760 for (n = 0; n < nents; n++) {
1761 dma_len = sg_dma_len(sg);
1762 if (dma_len > 0x400000)
1763 dev_err(&dd->pdev->dev,
1764 "DMA segment length truncated\n");
1765 command_sg->info = __force_bit2int
1766 cpu_to_le32((dma_len-1) & 0x3FFFFF);
1767 command_sg->dba = __force_bit2int
1768 cpu_to_le32(sg_dma_address(sg));
1769 command_sg->dba_upper = __force_bit2int
1770 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
1771 command_sg++;
1772 sg++;
1773 }
1774 }
1775
1776 /*
1777 * @brief Execute a drive command.
1778 *
1779 * return value 0 The command completed successfully.
1780 * return value -1 An error occurred while executing the command.
1781 */
1782 static int exec_drive_task(struct mtip_port *port, u8 *command)
1783 {
1784 struct host_to_dev_fis fis;
1785 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
1786 unsigned int to;
1787
1788 /* Build the FIS. */
1789 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1790 fis.type = 0x27;
1791 fis.opts = 1 << 7;
1792 fis.command = command[0];
1793 fis.features = command[1];
1794 fis.sect_count = command[2];
1795 fis.sector = command[3];
1796 fis.cyl_low = command[4];
1797 fis.cyl_hi = command[5];
1798 fis.device = command[6] & ~0x10; /* Clear the dev bit*/
1799
1800 mtip_set_timeout(port->dd, &fis, &to, 0);
1801
1802 dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
1803 __func__,
1804 command[0],
1805 command[1],
1806 command[2],
1807 command[3],
1808 command[4],
1809 command[5],
1810 command[6]);
1811
1812 /* Execute the command. */
1813 if (mtip_exec_internal_command(port,
1814 &fis,
1815 5,
1816 0,
1817 0,
1818 0,
1819 GFP_KERNEL,
1820 to) < 0) {
1821 return -1;
1822 }
1823
1824 command[0] = reply->command; /* Status*/
1825 command[1] = reply->features; /* Error*/
1826 command[4] = reply->cyl_low;
1827 command[5] = reply->cyl_hi;
1828
1829 dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
1830 __func__,
1831 command[0],
1832 command[1],
1833 command[4],
1834 command[5]);
1835
1836 return 0;
1837 }
1838
1839 /*
1840 * @brief Execute a drive command.
1841 *
1842 * @param port Pointer to the port data structure.
1843 * @param command Pointer to the user specified command parameters.
1844 * @param user_buffer Pointer to the user space buffer where read sector
1845 * data should be copied.
1846 *
1847 * return value 0 The command completed successfully.
1848 * return value -EFAULT An error occurred while copying the completion
1849 * data to the user space buffer.
1850 * return value -1 An error occurred while executing the command.
1851 */
1852 static int exec_drive_command(struct mtip_port *port, u8 *command,
1853 void __user *user_buffer)
1854 {
1855 struct host_to_dev_fis fis;
1856 struct host_to_dev_fis *reply;
1857 u8 *buf = NULL;
1858 dma_addr_t dma_addr = 0;
1859 int rv = 0, xfer_sz = command[3];
1860 unsigned int to;
1861
1862 if (xfer_sz) {
1863 if (!user_buffer)
1864 return -EFAULT;
1865
1866 buf = dmam_alloc_coherent(&port->dd->pdev->dev,
1867 ATA_SECT_SIZE * xfer_sz,
1868 &dma_addr,
1869 GFP_KERNEL);
1870 if (!buf) {
1871 dev_err(&port->dd->pdev->dev,
1872 "Memory allocation failed (%d bytes)\n",
1873 ATA_SECT_SIZE * xfer_sz);
1874 return -ENOMEM;
1875 }
1876 memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
1877 }
1878
1879 /* Build the FIS. */
1880 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1881 fis.type = 0x27;
1882 fis.opts = 1 << 7;
1883 fis.command = command[0];
1884 fis.features = command[2];
1885 fis.sect_count = command[3];
1886 if (fis.command == ATA_CMD_SMART) {
1887 fis.sector = command[1];
1888 fis.cyl_low = 0x4F;
1889 fis.cyl_hi = 0xC2;
1890 }
1891
1892 mtip_set_timeout(port->dd, &fis, &to, 0);
1893
1894 if (xfer_sz)
1895 reply = (port->rxfis + RX_FIS_PIO_SETUP);
1896 else
1897 reply = (port->rxfis + RX_FIS_D2H_REG);
1898
1899 dbg_printk(MTIP_DRV_NAME
1900 " %s: User Command: cmd %x, sect %x, "
1901 "feat %x, sectcnt %x\n",
1902 __func__,
1903 command[0],
1904 command[1],
1905 command[2],
1906 command[3]);
1907
1908 /* Execute the command. */
1909 if (mtip_exec_internal_command(port,
1910 &fis,
1911 5,
1912 (xfer_sz ? dma_addr : 0),
1913 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
1914 0,
1915 GFP_KERNEL,
1916 to)
1917 < 0) {
1918 rv = -EFAULT;
1919 goto exit_drive_command;
1920 }
1921
1922 /* Collect the completion status. */
1923 command[0] = reply->command; /* Status*/
1924 command[1] = reply->features; /* Error*/
1925 command[2] = reply->sect_count;
1926
1927 dbg_printk(MTIP_DRV_NAME
1928 " %s: Completion Status: stat %x, "
1929 "err %x, nsect %x\n",
1930 __func__,
1931 command[0],
1932 command[1],
1933 command[2]);
1934
1935 if (xfer_sz) {
1936 if (copy_to_user(user_buffer,
1937 buf,
1938 ATA_SECT_SIZE * command[3])) {
1939 rv = -EFAULT;
1940 goto exit_drive_command;
1941 }
1942 }
1943 exit_drive_command:
1944 if (buf)
1945 dmam_free_coherent(&port->dd->pdev->dev,
1946 ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
1947 return rv;
1948 }
1949
1950 /*
1951 * Indicates whether a command has a single sector payload.
1952 *
1953 * @command passed to the device to perform the certain event.
1954 * @features passed to the device to perform the certain event.
1955 *
1956 * return value
1957 * 1 command is one that always has a single sector payload,
1958 * regardless of the value in the Sector Count field.
1959 * 0 otherwise
1960 *
1961 */
1962 static unsigned int implicit_sector(unsigned char command,
1963 unsigned char features)
1964 {
1965 unsigned int rv = 0;
1966
1967 /* list of commands that have an implicit sector count of 1 */
1968 switch (command) {
1969 case ATA_CMD_SEC_SET_PASS:
1970 case ATA_CMD_SEC_UNLOCK:
1971 case ATA_CMD_SEC_ERASE_PREP:
1972 case ATA_CMD_SEC_ERASE_UNIT:
1973 case ATA_CMD_SEC_FREEZE_LOCK:
1974 case ATA_CMD_SEC_DISABLE_PASS:
1975 case ATA_CMD_PMP_READ:
1976 case ATA_CMD_PMP_WRITE:
1977 rv = 1;
1978 break;
1979 case ATA_CMD_SET_MAX:
1980 if (features == ATA_SET_MAX_UNLOCK)
1981 rv = 1;
1982 break;
1983 case ATA_CMD_SMART:
1984 if ((features == ATA_SMART_READ_VALUES) ||
1985 (features == ATA_SMART_READ_THRESHOLDS))
1986 rv = 1;
1987 break;
1988 case ATA_CMD_CONF_OVERLAY:
1989 if ((features == ATA_DCO_IDENTIFY) ||
1990 (features == ATA_DCO_SET))
1991 rv = 1;
1992 break;
1993 }
1994 return rv;
1995 }
1996
1997 /*
1998 * Executes a taskfile
1999 * See ide_taskfile_ioctl() for derivation
2000 */
2001 static int exec_drive_taskfile(struct driver_data *dd,
2002 void __user *buf,
2003 ide_task_request_t *req_task,
2004 int outtotal)
2005 {
2006 struct host_to_dev_fis fis;
2007 struct host_to_dev_fis *reply;
2008 u8 *outbuf = NULL;
2009 u8 *inbuf = NULL;
2010 dma_addr_t outbuf_dma = 0;
2011 dma_addr_t inbuf_dma = 0;
2012 dma_addr_t dma_buffer = 0;
2013 int err = 0;
2014 unsigned int taskin = 0;
2015 unsigned int taskout = 0;
2016 u8 nsect = 0;
2017 unsigned int timeout;
2018 unsigned int force_single_sector;
2019 unsigned int transfer_size;
2020 unsigned long task_file_data;
2021 int intotal = outtotal + req_task->out_size;
2022 int erasemode = 0;
2023
2024 taskout = req_task->out_size;
2025 taskin = req_task->in_size;
2026 /* 130560 = 512 * 0xFF*/
2027 if (taskin > 130560 || taskout > 130560) {
2028 err = -EINVAL;
2029 goto abort;
2030 }
2031
2032 if (taskout) {
2033 outbuf = memdup_user(buf + outtotal, taskout);
2034 if (IS_ERR(outbuf)) {
2035 err = PTR_ERR(outbuf);
2036 outbuf = NULL;
2037 goto abort;
2038 }
2039 outbuf_dma = pci_map_single(dd->pdev,
2040 outbuf,
2041 taskout,
2042 DMA_TO_DEVICE);
2043 if (outbuf_dma == 0) {
2044 err = -ENOMEM;
2045 goto abort;
2046 }
2047 dma_buffer = outbuf_dma;
2048 }
2049
2050 if (taskin) {
2051 inbuf = memdup_user(buf + intotal, taskin);
2052 if (IS_ERR(inbuf)) {
2053 err = PTR_ERR(inbuf);
2054 inbuf = NULL;
2055 goto abort;
2056 }
2057 inbuf_dma = pci_map_single(dd->pdev,
2058 inbuf,
2059 taskin, DMA_FROM_DEVICE);
2060 if (inbuf_dma == 0) {
2061 err = -ENOMEM;
2062 goto abort;
2063 }
2064 dma_buffer = inbuf_dma;
2065 }
2066
2067 /* only supports PIO and non-data commands from this ioctl. */
2068 switch (req_task->data_phase) {
2069 case TASKFILE_OUT:
2070 nsect = taskout / ATA_SECT_SIZE;
2071 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
2072 break;
2073 case TASKFILE_IN:
2074 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
2075 break;
2076 case TASKFILE_NO_DATA:
2077 reply = (dd->port->rxfis + RX_FIS_D2H_REG);
2078 break;
2079 default:
2080 err = -EINVAL;
2081 goto abort;
2082 }
2083
2084 /* Build the FIS. */
2085 memset(&fis, 0, sizeof(struct host_to_dev_fis));
2086
2087 fis.type = 0x27;
2088 fis.opts = 1 << 7;
2089 fis.command = req_task->io_ports[7];
2090 fis.features = req_task->io_ports[1];
2091 fis.sect_count = req_task->io_ports[2];
2092 fis.lba_low = req_task->io_ports[3];
2093 fis.lba_mid = req_task->io_ports[4];
2094 fis.lba_hi = req_task->io_ports[5];
2095 /* Clear the dev bit*/
2096 fis.device = req_task->io_ports[6] & ~0x10;
2097
2098 if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
2099 req_task->in_flags.all =
2100 IDE_TASKFILE_STD_IN_FLAGS |
2101 (IDE_HOB_STD_IN_FLAGS << 8);
2102 fis.lba_low_ex = req_task->hob_ports[3];
2103 fis.lba_mid_ex = req_task->hob_ports[4];
2104 fis.lba_hi_ex = req_task->hob_ports[5];
2105 fis.features_ex = req_task->hob_ports[1];
2106 fis.sect_cnt_ex = req_task->hob_ports[2];
2107
2108 } else {
2109 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
2110 }
2111
2112 force_single_sector = implicit_sector(fis.command, fis.features);
2113
2114 if ((taskin || taskout) && (!fis.sect_count)) {
2115 if (nsect)
2116 fis.sect_count = nsect;
2117 else {
2118 if (!force_single_sector) {
2119 dev_warn(&dd->pdev->dev,
2120 "data movement but "
2121 "sect_count is 0\n");
2122 err = -EINVAL;
2123 goto abort;
2124 }
2125 }
2126 }
2127
2128 dbg_printk(MTIP_DRV_NAME
2129 " %s: cmd %x, feat %x, nsect %x,"
2130 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
2131 " head/dev %x\n",
2132 __func__,
2133 fis.command,
2134 fis.features,
2135 fis.sect_count,
2136 fis.lba_low,
2137 fis.lba_mid,
2138 fis.lba_hi,
2139 fis.device);
2140
2141 /* check for erase mode support during secure erase.*/
2142 if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
2143 (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
2144 erasemode = 1;
2145 }
2146
2147 mtip_set_timeout(dd, &fis, &timeout, erasemode);
2148
2149 /* Determine the correct transfer size.*/
2150 if (force_single_sector)
2151 transfer_size = ATA_SECT_SIZE;
2152 else
2153 transfer_size = ATA_SECT_SIZE * fis.sect_count;
2154
2155 /* Execute the command.*/
2156 if (mtip_exec_internal_command(dd->port,
2157 &fis,
2158 5,
2159 dma_buffer,
2160 transfer_size,
2161 0,
2162 GFP_KERNEL,
2163 timeout) < 0) {
2164 err = -EIO;
2165 goto abort;
2166 }
2167
2168 task_file_data = readl(dd->port->mmio+PORT_TFDATA);
2169
2170 if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
2171 reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
2172 req_task->io_ports[7] = reply->control;
2173 } else {
2174 reply = dd->port->rxfis + RX_FIS_D2H_REG;
2175 req_task->io_ports[7] = reply->command;
2176 }
2177
2178 /* reclaim the DMA buffers.*/
2179 if (inbuf_dma)
2180 pci_unmap_single(dd->pdev, inbuf_dma,
2181 taskin, DMA_FROM_DEVICE);
2182 if (outbuf_dma)
2183 pci_unmap_single(dd->pdev, outbuf_dma,
2184 taskout, DMA_TO_DEVICE);
2185 inbuf_dma = 0;
2186 outbuf_dma = 0;
2187
2188 /* return the ATA registers to the caller.*/
2189 req_task->io_ports[1] = reply->features;
2190 req_task->io_ports[2] = reply->sect_count;
2191 req_task->io_ports[3] = reply->lba_low;
2192 req_task->io_ports[4] = reply->lba_mid;
2193 req_task->io_ports[5] = reply->lba_hi;
2194 req_task->io_ports[6] = reply->device;
2195
2196 if (req_task->out_flags.all & 1) {
2197
2198 req_task->hob_ports[3] = reply->lba_low_ex;
2199 req_task->hob_ports[4] = reply->lba_mid_ex;
2200 req_task->hob_ports[5] = reply->lba_hi_ex;
2201 req_task->hob_ports[1] = reply->features_ex;
2202 req_task->hob_ports[2] = reply->sect_cnt_ex;
2203 }
2204 dbg_printk(MTIP_DRV_NAME
2205 " %s: Completion: stat %x,"
2206 "err %x, sect_cnt %x, lbalo %x,"
2207 "lbamid %x, lbahi %x, dev %x\n",
2208 __func__,
2209 req_task->io_ports[7],
2210 req_task->io_ports[1],
2211 req_task->io_ports[2],
2212 req_task->io_ports[3],
2213 req_task->io_ports[4],
2214 req_task->io_ports[5],
2215 req_task->io_ports[6]);
2216
2217 if (taskout) {
2218 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
2219 err = -EFAULT;
2220 goto abort;
2221 }
2222 }
2223 if (taskin) {
2224 if (copy_to_user(buf + intotal, inbuf, taskin)) {
2225 err = -EFAULT;
2226 goto abort;
2227 }
2228 }
2229 abort:
2230 if (inbuf_dma)
2231 pci_unmap_single(dd->pdev, inbuf_dma,
2232 taskin, DMA_FROM_DEVICE);
2233 if (outbuf_dma)
2234 pci_unmap_single(dd->pdev, outbuf_dma,
2235 taskout, DMA_TO_DEVICE);
2236 kfree(outbuf);
2237 kfree(inbuf);
2238
2239 return err;
2240 }
2241
2242 /*
2243 * Handle IOCTL calls from the Block Layer.
2244 *
2245 * This function is called by the Block Layer when it receives an IOCTL
2246 * command that it does not understand. If the IOCTL command is not supported
2247 * this function returns -ENOTTY.
2248 *
2249 * @dd Pointer to the driver data structure.
2250 * @cmd IOCTL command passed from the Block Layer.
2251 * @arg IOCTL argument passed from the Block Layer.
2252 *
2253 * return value
2254 * 0 The IOCTL completed successfully.
2255 * -ENOTTY The specified command is not supported.
2256 * -EFAULT An error occurred copying data to a user space buffer.
2257 * -EIO An error occurred while executing the command.
2258 */
2259 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2260 unsigned long arg)
2261 {
2262 switch (cmd) {
2263 case HDIO_GET_IDENTITY:
2264 {
2265 if (copy_to_user((void __user *)arg, dd->port->identify,
2266 sizeof(u16) * ATA_ID_WORDS))
2267 return -EFAULT;
2268 break;
2269 }
2270 case HDIO_DRIVE_CMD:
2271 {
2272 u8 drive_command[4];
2273
2274 /* Copy the user command info to our buffer. */
2275 if (copy_from_user(drive_command,
2276 (void __user *) arg,
2277 sizeof(drive_command)))
2278 return -EFAULT;
2279
2280 /* Execute the drive command. */
2281 if (exec_drive_command(dd->port,
2282 drive_command,
2283 (void __user *) (arg+4)))
2284 return -EIO;
2285
2286 /* Copy the status back to the users buffer. */
2287 if (copy_to_user((void __user *) arg,
2288 drive_command,
2289 sizeof(drive_command)))
2290 return -EFAULT;
2291
2292 break;
2293 }
2294 case HDIO_DRIVE_TASK:
2295 {
2296 u8 drive_command[7];
2297
2298 /* Copy the user command info to our buffer. */
2299 if (copy_from_user(drive_command,
2300 (void __user *) arg,
2301 sizeof(drive_command)))
2302 return -EFAULT;
2303
2304 /* Execute the drive command. */
2305 if (exec_drive_task(dd->port, drive_command))
2306 return -EIO;
2307
2308 /* Copy the status back to the users buffer. */
2309 if (copy_to_user((void __user *) arg,
2310 drive_command,
2311 sizeof(drive_command)))
2312 return -EFAULT;
2313
2314 break;
2315 }
2316 case HDIO_DRIVE_TASKFILE: {
2317 ide_task_request_t req_task;
2318 int ret, outtotal;
2319
2320 if (copy_from_user(&req_task, (void __user *) arg,
2321 sizeof(req_task)))
2322 return -EFAULT;
2323
2324 outtotal = sizeof(req_task);
2325
2326 ret = exec_drive_taskfile(dd, (void __user *) arg,
2327 &req_task, outtotal);
2328
2329 if (copy_to_user((void __user *) arg, &req_task,
2330 sizeof(req_task)))
2331 return -EFAULT;
2332
2333 return ret;
2334 }
2335
2336 default:
2337 return -EINVAL;
2338 }
2339 return 0;
2340 }
2341
2342 /*
2343 * Submit an IO to the hw
2344 *
2345 * This function is called by the block layer to issue an io
2346 * to the device. Upon completion, the callback function will
2347 * be called with the data parameter passed as the callback data.
2348 *
2349 * @dd Pointer to the driver data structure.
2350 * @start First sector to read.
2351 * @nsect Number of sectors to read.
2352 * @nents Number of entries in scatter list for the read command.
2353 * @tag The tag of this read command.
2354 * @callback Pointer to the function that should be called
2355 * when the read completes.
2356 * @data Callback data passed to the callback function
2357 * when the read completes.
2358 * @dir Direction (read or write)
2359 *
2360 * return value
2361 * None
2362 */
2363 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
2364 struct mtip_cmd *command, int nents,
2365 struct blk_mq_hw_ctx *hctx)
2366 {
2367 struct host_to_dev_fis *fis;
2368 struct mtip_port *port = dd->port;
2369 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2370 u64 start = blk_rq_pos(rq);
2371 unsigned int nsect = blk_rq_sectors(rq);
2372
2373 /* Map the scatter list for DMA access */
2374 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
2375
2376 prefetch(&port->flags);
2377
2378 command->scatter_ents = nents;
2379
2380 /*
2381 * The number of retries for this command before it is
2382 * reported as a failure to the upper layers.
2383 */
2384 command->retries = MTIP_MAX_RETRIES;
2385
2386 /* Fill out fis */
2387 fis = command->command;
2388 fis->type = 0x27;
2389 fis->opts = 1 << 7;
2390 if (dma_dir == DMA_FROM_DEVICE)
2391 fis->command = ATA_CMD_FPDMA_READ;
2392 else
2393 fis->command = ATA_CMD_FPDMA_WRITE;
2394 fis->lba_low = start & 0xFF;
2395 fis->lba_mid = (start >> 8) & 0xFF;
2396 fis->lba_hi = (start >> 16) & 0xFF;
2397 fis->lba_low_ex = (start >> 24) & 0xFF;
2398 fis->lba_mid_ex = (start >> 32) & 0xFF;
2399 fis->lba_hi_ex = (start >> 40) & 0xFF;
2400 fis->device = 1 << 6;
2401 fis->features = nsect & 0xFF;
2402 fis->features_ex = (nsect >> 8) & 0xFF;
2403 fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
2404 fis->sect_cnt_ex = 0;
2405 fis->control = 0;
2406 fis->res2 = 0;
2407 fis->res3 = 0;
2408 fill_command_sg(dd, command, nents);
2409
2410 if (unlikely(command->unaligned))
2411 fis->device |= 1 << 7;
2412
2413 /* Populate the command header */
2414 command->command_header->opts =
2415 __force_bit2int cpu_to_le32(
2416 (nents << 16) | 5 | AHCI_CMD_PREFETCH);
2417 command->command_header->byte_count = 0;
2418
2419 /*
2420 * Set the completion function and data for the command
2421 * within this layer.
2422 */
2423 command->comp_data = dd;
2424 command->comp_func = mtip_async_complete;
2425 command->direction = dma_dir;
2426
2427 /*
2428 * To prevent this command from being issued
2429 * if an internal command is in progress or error handling is active.
2430 */
2431 if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
2432 set_bit(rq->tag, port->cmds_to_issue);
2433 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2434 return;
2435 }
2436
2437 /* Issue the command to the hardware */
2438 mtip_issue_ncq_command(port, rq->tag);
2439 }
2440
2441 /*
2442 * Sysfs status dump.
2443 *
2444 * @dev Pointer to the device structure, passed by the kernrel.
2445 * @attr Pointer to the device_attribute structure passed by the kernel.
2446 * @buf Pointer to the char buffer that will receive the stats info.
2447 *
2448 * return value
2449 * The size, in bytes, of the data copied into buf.
2450 */
2451 static ssize_t mtip_hw_show_status(struct device *dev,
2452 struct device_attribute *attr,
2453 char *buf)
2454 {
2455 struct driver_data *dd = dev_to_disk(dev)->private_data;
2456 int size = 0;
2457
2458 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2459 size += sprintf(buf, "%s", "thermal_shutdown\n");
2460 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2461 size += sprintf(buf, "%s", "write_protect\n");
2462 else
2463 size += sprintf(buf, "%s", "online\n");
2464
2465 return size;
2466 }
2467
2468 static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2469
2470 /* debugsfs entries */
2471
2472 static ssize_t show_device_status(struct device_driver *drv, char *buf)
2473 {
2474 int size = 0;
2475 struct driver_data *dd, *tmp;
2476 unsigned long flags;
2477 char id_buf[42];
2478 u16 status = 0;
2479
2480 spin_lock_irqsave(&dev_lock, flags);
2481 size += sprintf(&buf[size], "Devices Present:\n");
2482 list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
2483 if (dd->pdev) {
2484 if (dd->port &&
2485 dd->port->identify &&
2486 dd->port->identify_valid) {
2487 strlcpy(id_buf,
2488 (char *) (dd->port->identify + 10), 21);
2489 status = *(dd->port->identify + 141);
2490 } else {
2491 memset(id_buf, 0, 42);
2492 status = 0;
2493 }
2494
2495 if (dd->port &&
2496 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2497 size += sprintf(&buf[size],
2498 " device %s %s (ftl rebuild %d %%)\n",
2499 dev_name(&dd->pdev->dev),
2500 id_buf,
2501 status);
2502 } else {
2503 size += sprintf(&buf[size],
2504 " device %s %s\n",
2505 dev_name(&dd->pdev->dev),
2506 id_buf);
2507 }
2508 }
2509 }
2510
2511 size += sprintf(&buf[size], "Devices Being Removed:\n");
2512 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
2513 if (dd->pdev) {
2514 if (dd->port &&
2515 dd->port->identify &&
2516 dd->port->identify_valid) {
2517 strlcpy(id_buf,
2518 (char *) (dd->port->identify+10), 21);
2519 status = *(dd->port->identify + 141);
2520 } else {
2521 memset(id_buf, 0, 42);
2522 status = 0;
2523 }
2524
2525 if (dd->port &&
2526 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2527 size += sprintf(&buf[size],
2528 " device %s %s (ftl rebuild %d %%)\n",
2529 dev_name(&dd->pdev->dev),
2530 id_buf,
2531 status);
2532 } else {
2533 size += sprintf(&buf[size],
2534 " device %s %s\n",
2535 dev_name(&dd->pdev->dev),
2536 id_buf);
2537 }
2538 }
2539 }
2540 spin_unlock_irqrestore(&dev_lock, flags);
2541
2542 return size;
2543 }
2544
2545 static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
2546 size_t len, loff_t *offset)
2547 {
2548 struct driver_data *dd = (struct driver_data *)f->private_data;
2549 int size = *offset;
2550 char *buf;
2551 int rv = 0;
2552
2553 if (!len || *offset)
2554 return 0;
2555
2556 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2557 if (!buf) {
2558 dev_err(&dd->pdev->dev,
2559 "Memory allocation: status buffer\n");
2560 return -ENOMEM;
2561 }
2562
2563 size += show_device_status(NULL, buf);
2564
2565 *offset = size <= len ? size : len;
2566 size = copy_to_user(ubuf, buf, *offset);
2567 if (size)
2568 rv = -EFAULT;
2569
2570 kfree(buf);
2571 return rv ? rv : *offset;
2572 }
2573
2574 static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2575 size_t len, loff_t *offset)
2576 {
2577 struct driver_data *dd = (struct driver_data *)f->private_data;
2578 char *buf;
2579 u32 group_allocated;
2580 int size = *offset;
2581 int n, rv = 0;
2582
2583 if (!len || size)
2584 return 0;
2585
2586 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2587 if (!buf) {
2588 dev_err(&dd->pdev->dev,
2589 "Memory allocation: register buffer\n");
2590 return -ENOMEM;
2591 }
2592
2593 size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
2594
2595 for (n = dd->slot_groups-1; n >= 0; n--)
2596 size += sprintf(&buf[size], "%08X ",
2597 readl(dd->port->s_active[n]));
2598
2599 size += sprintf(&buf[size], "]\n");
2600 size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
2601
2602 for (n = dd->slot_groups-1; n >= 0; n--)
2603 size += sprintf(&buf[size], "%08X ",
2604 readl(dd->port->cmd_issue[n]));
2605
2606 size += sprintf(&buf[size], "]\n");
2607 size += sprintf(&buf[size], "H/ Completed : [ 0x");
2608
2609 for (n = dd->slot_groups-1; n >= 0; n--)
2610 size += sprintf(&buf[size], "%08X ",
2611 readl(dd->port->completed[n]));
2612
2613 size += sprintf(&buf[size], "]\n");
2614 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
2615 readl(dd->port->mmio + PORT_IRQ_STAT));
2616 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
2617 readl(dd->mmio + HOST_IRQ_STAT));
2618 size += sprintf(&buf[size], "\n");
2619
2620 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
2621
2622 for (n = dd->slot_groups-1; n >= 0; n--) {
2623 if (sizeof(long) > sizeof(u32))
2624 group_allocated =
2625 dd->port->cmds_to_issue[n/2] >> (32*(n&1));
2626 else
2627 group_allocated = dd->port->cmds_to_issue[n];
2628 size += sprintf(&buf[size], "%08X ", group_allocated);
2629 }
2630 size += sprintf(&buf[size], "]\n");
2631
2632 *offset = size <= len ? size : len;
2633 size = copy_to_user(ubuf, buf, *offset);
2634 if (size)
2635 rv = -EFAULT;
2636
2637 kfree(buf);
2638 return rv ? rv : *offset;
2639 }
2640
2641 static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2642 size_t len, loff_t *offset)
2643 {
2644 struct driver_data *dd = (struct driver_data *)f->private_data;
2645 char *buf;
2646 int size = *offset;
2647 int rv = 0;
2648
2649 if (!len || size)
2650 return 0;
2651
2652 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2653 if (!buf) {
2654 dev_err(&dd->pdev->dev,
2655 "Memory allocation: flag buffer\n");
2656 return -ENOMEM;
2657 }
2658
2659 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
2660 dd->port->flags);
2661 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
2662 dd->dd_flag);
2663
2664 *offset = size <= len ? size : len;
2665 size = copy_to_user(ubuf, buf, *offset);
2666 if (size)
2667 rv = -EFAULT;
2668
2669 kfree(buf);
2670 return rv ? rv : *offset;
2671 }
2672
2673 static const struct file_operations mtip_device_status_fops = {
2674 .owner = THIS_MODULE,
2675 .open = simple_open,
2676 .read = mtip_hw_read_device_status,
2677 .llseek = no_llseek,
2678 };
2679
2680 static const struct file_operations mtip_regs_fops = {
2681 .owner = THIS_MODULE,
2682 .open = simple_open,
2683 .read = mtip_hw_read_registers,
2684 .llseek = no_llseek,
2685 };
2686
2687 static const struct file_operations mtip_flags_fops = {
2688 .owner = THIS_MODULE,
2689 .open = simple_open,
2690 .read = mtip_hw_read_flags,
2691 .llseek = no_llseek,
2692 };
2693
2694 /*
2695 * Create the sysfs related attributes.
2696 *
2697 * @dd Pointer to the driver data structure.
2698 * @kobj Pointer to the kobj for the block device.
2699 *
2700 * return value
2701 * 0 Operation completed successfully.
2702 * -EINVAL Invalid parameter.
2703 */
2704 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2705 {
2706 if (!kobj || !dd)
2707 return -EINVAL;
2708
2709 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2710 dev_warn(&dd->pdev->dev,
2711 "Error creating 'status' sysfs entry\n");
2712 return 0;
2713 }
2714
2715 /*
2716 * Remove the sysfs related attributes.
2717 *
2718 * @dd Pointer to the driver data structure.
2719 * @kobj Pointer to the kobj for the block device.
2720 *
2721 * return value
2722 * 0 Operation completed successfully.
2723 * -EINVAL Invalid parameter.
2724 */
2725 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2726 {
2727 if (!kobj || !dd)
2728 return -EINVAL;
2729
2730 sysfs_remove_file(kobj, &dev_attr_status.attr);
2731
2732 return 0;
2733 }
2734
2735 static int mtip_hw_debugfs_init(struct driver_data *dd)
2736 {
2737 if (!dfs_parent)
2738 return -1;
2739
2740 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
2741 if (IS_ERR_OR_NULL(dd->dfs_node)) {
2742 dev_warn(&dd->pdev->dev,
2743 "Error creating node %s under debugfs\n",
2744 dd->disk->disk_name);
2745 dd->dfs_node = NULL;
2746 return -1;
2747 }
2748
2749 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
2750 &mtip_flags_fops);
2751 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
2752 &mtip_regs_fops);
2753
2754 return 0;
2755 }
2756
2757 static void mtip_hw_debugfs_exit(struct driver_data *dd)
2758 {
2759 if (dd->dfs_node)
2760 debugfs_remove_recursive(dd->dfs_node);
2761 }
2762
2763 /*
2764 * Perform any init/resume time hardware setup
2765 *
2766 * @dd Pointer to the driver data structure.
2767 *
2768 * return value
2769 * None
2770 */
2771 static inline void hba_setup(struct driver_data *dd)
2772 {
2773 u32 hwdata;
2774 hwdata = readl(dd->mmio + HOST_HSORG);
2775
2776 /* interrupt bug workaround: use only 1 IS bit.*/
2777 writel(hwdata |
2778 HSORG_DISABLE_SLOTGRP_INTR |
2779 HSORG_DISABLE_SLOTGRP_PXIS,
2780 dd->mmio + HOST_HSORG);
2781 }
2782
2783 static int mtip_device_unaligned_constrained(struct driver_data *dd)
2784 {
2785 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
2786 }
2787
2788 /*
2789 * Detect the details of the product, and store anything needed
2790 * into the driver data structure. This includes product type and
2791 * version and number of slot groups.
2792 *
2793 * @dd Pointer to the driver data structure.
2794 *
2795 * return value
2796 * None
2797 */
2798 static void mtip_detect_product(struct driver_data *dd)
2799 {
2800 u32 hwdata;
2801 unsigned int rev, slotgroups;
2802
2803 /*
2804 * HBA base + 0xFC [15:0] - vendor-specific hardware interface
2805 * info register:
2806 * [15:8] hardware/software interface rev#
2807 * [ 3] asic-style interface
2808 * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
2809 */
2810 hwdata = readl(dd->mmio + HOST_HSORG);
2811
2812 dd->product_type = MTIP_PRODUCT_UNKNOWN;
2813 dd->slot_groups = 1;
2814
2815 if (hwdata & 0x8) {
2816 dd->product_type = MTIP_PRODUCT_ASICFPGA;
2817 rev = (hwdata & HSORG_HWREV) >> 8;
2818 slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
2819 dev_info(&dd->pdev->dev,
2820 "ASIC-FPGA design, HS rev 0x%x, "
2821 "%i slot groups [%i slots]\n",
2822 rev,
2823 slotgroups,
2824 slotgroups * 32);
2825
2826 if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
2827 dev_warn(&dd->pdev->dev,
2828 "Warning: driver only supports "
2829 "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
2830 slotgroups = MTIP_MAX_SLOT_GROUPS;
2831 }
2832 dd->slot_groups = slotgroups;
2833 return;
2834 }
2835
2836 dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
2837 }
2838
2839 /*
2840 * Blocking wait for FTL rebuild to complete
2841 *
2842 * @dd Pointer to the DRIVER_DATA structure.
2843 *
2844 * return value
2845 * 0 FTL rebuild completed successfully
2846 * -EFAULT FTL rebuild error/timeout/interruption
2847 */
2848 static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2849 {
2850 unsigned long timeout, cnt = 0, start;
2851
2852 dev_warn(&dd->pdev->dev,
2853 "FTL rebuild in progress. Polling for completion.\n");
2854
2855 start = jiffies;
2856 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
2857
2858 do {
2859 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2860 &dd->dd_flag)))
2861 return -EFAULT;
2862 if (mtip_check_surprise_removal(dd->pdev))
2863 return -EFAULT;
2864
2865 if (mtip_get_identify(dd->port, NULL) < 0)
2866 return -EFAULT;
2867
2868 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2869 MTIP_FTL_REBUILD_MAGIC) {
2870 ssleep(1);
2871 /* Print message every 3 minutes */
2872 if (cnt++ >= 180) {
2873 dev_warn(&dd->pdev->dev,
2874 "FTL rebuild in progress (%d secs).\n",
2875 jiffies_to_msecs(jiffies - start) / 1000);
2876 cnt = 0;
2877 }
2878 } else {
2879 dev_warn(&dd->pdev->dev,
2880 "FTL rebuild complete (%d secs).\n",
2881 jiffies_to_msecs(jiffies - start) / 1000);
2882 mtip_block_initialize(dd);
2883 return 0;
2884 }
2885 } while (time_before(jiffies, timeout));
2886
2887 /* Check for timeout */
2888 dev_err(&dd->pdev->dev,
2889 "Timed out waiting for FTL rebuild to complete (%d secs).\n",
2890 jiffies_to_msecs(jiffies - start) / 1000);
2891 return -EFAULT;
2892 }
2893
2894 /*
2895 * service thread to issue queued commands
2896 *
2897 * @data Pointer to the driver data structure.
2898 *
2899 * return value
2900 * 0
2901 */
2902
2903 static int mtip_service_thread(void *data)
2904 {
2905 struct driver_data *dd = (struct driver_data *)data;
2906 unsigned long slot, slot_start, slot_wrap;
2907 unsigned int num_cmd_slots = dd->slot_groups * 32;
2908 struct mtip_port *port = dd->port;
2909
2910 while (1) {
2911 if (kthread_should_stop() ||
2912 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2913 goto st_out;
2914 clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2915
2916 /*
2917 * the condition is to check neither an internal command is
2918 * is in progress nor error handling is active
2919 */
2920 wait_event_interruptible(port->svc_wait, (port->flags) &&
2921 !(port->flags & MTIP_PF_PAUSE_IO));
2922
2923 set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2924
2925 if (kthread_should_stop() ||
2926 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2927 goto st_out;
2928
2929 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2930 &dd->dd_flag)))
2931 goto st_out;
2932
2933 restart_eh:
2934 /* Demux bits: start with error handling */
2935 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
2936 mtip_handle_tfe(dd);
2937 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
2938 }
2939
2940 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
2941 goto restart_eh;
2942
2943 if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
2944 slot = 1;
2945 /* used to restrict the loop to one iteration */
2946 slot_start = num_cmd_slots;
2947 slot_wrap = 0;
2948 while (1) {
2949 slot = find_next_bit(port->cmds_to_issue,
2950 num_cmd_slots, slot);
2951 if (slot_wrap == 1) {
2952 if ((slot_start >= slot) ||
2953 (slot >= num_cmd_slots))
2954 break;
2955 }
2956 if (unlikely(slot_start == num_cmd_slots))
2957 slot_start = slot;
2958
2959 if (unlikely(slot == num_cmd_slots)) {
2960 slot = 1;
2961 slot_wrap = 1;
2962 continue;
2963 }
2964
2965 /* Issue the command to the hardware */
2966 mtip_issue_ncq_command(port, slot);
2967
2968 clear_bit(slot, port->cmds_to_issue);
2969 }
2970
2971 clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2972 }
2973
2974 if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
2975 if (mtip_ftl_rebuild_poll(dd) < 0)
2976 set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
2977 &dd->dd_flag);
2978 clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2979 }
2980 }
2981
2982 st_out:
2983 return 0;
2984 }
2985
2986 /*
2987 * DMA region teardown
2988 *
2989 * @dd Pointer to driver_data structure
2990 *
2991 * return value
2992 * None
2993 */
2994 static void mtip_dma_free(struct driver_data *dd)
2995 {
2996 struct mtip_port *port = dd->port;
2997
2998 if (port->block1)
2999 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
3000 port->block1, port->block1_dma);
3001
3002 if (port->command_list) {
3003 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
3004 port->command_list, port->command_list_dma);
3005 }
3006 }
3007
3008 /*
3009 * DMA region setup
3010 *
3011 * @dd Pointer to driver_data structure
3012 *
3013 * return value
3014 * -ENOMEM Not enough free DMA region space to initialize driver
3015 */
3016 static int mtip_dma_alloc(struct driver_data *dd)
3017 {
3018 struct mtip_port *port = dd->port;
3019
3020 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
3021 port->block1 =
3022 dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
3023 &port->block1_dma, GFP_KERNEL);
3024 if (!port->block1)
3025 return -ENOMEM;
3026 memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
3027
3028 /* Allocate dma memory for command list */
3029 port->command_list =
3030 dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
3031 &port->command_list_dma, GFP_KERNEL);
3032 if (!port->command_list) {
3033 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
3034 port->block1, port->block1_dma);
3035 port->block1 = NULL;
3036 port->block1_dma = 0;
3037 return -ENOMEM;
3038 }
3039 memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
3040
3041 /* Setup all pointers into first DMA region */
3042 port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
3043 port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET;
3044 port->identify = port->block1 + AHCI_IDFY_OFFSET;
3045 port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET;
3046 port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET;
3047 port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET;
3048 port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
3049 port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
3050
3051 return 0;
3052 }
3053
3054 static int mtip_hw_get_identify(struct driver_data *dd)
3055 {
3056 struct smart_attr attr242;
3057 unsigned char *buf;
3058 int rv;
3059
3060 if (mtip_get_identify(dd->port, NULL) < 0)
3061 return -EFAULT;
3062
3063 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
3064 MTIP_FTL_REBUILD_MAGIC) {
3065 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
3066 return MTIP_FTL_REBUILD_MAGIC;
3067 }
3068 mtip_dump_identify(dd->port);
3069
3070 /* check write protect, over temp and rebuild statuses */
3071 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
3072 dd->port->log_buf,
3073 dd->port->log_buf_dma, 1);
3074 if (rv) {
3075 dev_warn(&dd->pdev->dev,
3076 "Error in READ LOG EXT (10h) command\n");
3077 /* non-critical error, don't fail the load */
3078 } else {
3079 buf = (unsigned char *)dd->port->log_buf;
3080 if (buf[259] & 0x1) {
3081 dev_info(&dd->pdev->dev,
3082 "Write protect bit is set.\n");
3083 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
3084 }
3085 if (buf[288] == 0xF7) {
3086 dev_info(&dd->pdev->dev,
3087 "Exceeded Tmax, drive in thermal shutdown.\n");
3088 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
3089 }
3090 if (buf[288] == 0xBF) {
3091 dev_info(&dd->pdev->dev,
3092 "Drive indicates rebuild has failed.\n");
3093 /* TODO */
3094 }
3095 }
3096
3097 /* get write protect progess */
3098 memset(&attr242, 0, sizeof(struct smart_attr));
3099 if (mtip_get_smart_attr(dd->port, 242, &attr242))
3100 dev_warn(&dd->pdev->dev,
3101 "Unable to check write protect progress\n");
3102 else
3103 dev_info(&dd->pdev->dev,
3104 "Write protect progress: %u%% (%u blocks)\n",
3105 attr242.cur, le32_to_cpu(attr242.data));
3106
3107 return rv;
3108 }
3109
3110 /*
3111 * Called once for each card.
3112 *
3113 * @dd Pointer to the driver data structure.
3114 *
3115 * return value
3116 * 0 on success, else an error code.
3117 */
3118 static int mtip_hw_init(struct driver_data *dd)
3119 {
3120 int i;
3121 int rv;
3122 unsigned int num_command_slots;
3123 unsigned long timeout, timetaken;
3124
3125 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
3126
3127 mtip_detect_product(dd);
3128 if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
3129 rv = -EIO;
3130 goto out1;
3131 }
3132 num_command_slots = dd->slot_groups * 32;
3133
3134 hba_setup(dd);
3135
3136 dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
3137 dd->numa_node);
3138 if (!dd->port) {
3139 dev_err(&dd->pdev->dev,
3140 "Memory allocation: port structure\n");
3141 return -ENOMEM;
3142 }
3143
3144 /* Continue workqueue setup */
3145 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
3146 dd->work[i].port = dd->port;
3147
3148 /* Enable unaligned IO constraints for some devices */
3149 if (mtip_device_unaligned_constrained(dd))
3150 dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
3151 else
3152 dd->unal_qdepth = 0;
3153
3154 sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
3155
3156 /* Spinlock to prevent concurrent issue */
3157 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
3158 spin_lock_init(&dd->port->cmd_issue_lock[i]);
3159
3160 /* Set the port mmio base address. */
3161 dd->port->mmio = dd->mmio + PORT_OFFSET;
3162 dd->port->dd = dd;
3163
3164 /* DMA allocations */
3165 rv = mtip_dma_alloc(dd);
3166 if (rv < 0)
3167 goto out1;
3168
3169 /* Setup the pointers to the extended s_active and CI registers. */
3170 for (i = 0; i < dd->slot_groups; i++) {
3171 dd->port->s_active[i] =
3172 dd->port->mmio + i*0x80 + PORT_SCR_ACT;
3173 dd->port->cmd_issue[i] =
3174 dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
3175 dd->port->completed[i] =
3176 dd->port->mmio + i*0x80 + PORT_SDBV;
3177 }
3178
3179 timetaken = jiffies;
3180 timeout = jiffies + msecs_to_jiffies(30000);
3181 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
3182 time_before(jiffies, timeout)) {
3183 mdelay(100);
3184 }
3185 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
3186 timetaken = jiffies - timetaken;
3187 dev_warn(&dd->pdev->dev,
3188 "Surprise removal detected at %u ms\n",
3189 jiffies_to_msecs(timetaken));
3190 rv = -ENODEV;
3191 goto out2 ;
3192 }
3193 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
3194 timetaken = jiffies - timetaken;
3195 dev_warn(&dd->pdev->dev,
3196 "Removal detected at %u ms\n",
3197 jiffies_to_msecs(timetaken));
3198 rv = -EFAULT;
3199 goto out2;
3200 }
3201
3202 /* Conditionally reset the HBA. */
3203 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
3204 if (mtip_hba_reset(dd) < 0) {
3205 dev_err(&dd->pdev->dev,
3206 "Card did not reset within timeout\n");
3207 rv = -EIO;
3208 goto out2;
3209 }
3210 } else {
3211 /* Clear any pending interrupts on the HBA */
3212 writel(readl(dd->mmio + HOST_IRQ_STAT),
3213 dd->mmio + HOST_IRQ_STAT);
3214 }
3215
3216 mtip_init_port(dd->port);
3217 mtip_start_port(dd->port);
3218
3219 /* Setup the ISR and enable interrupts. */
3220 rv = devm_request_irq(&dd->pdev->dev,
3221 dd->pdev->irq,
3222 mtip_irq_handler,
3223 IRQF_SHARED,
3224 dev_driver_string(&dd->pdev->dev),
3225 dd);
3226
3227 if (rv) {
3228 dev_err(&dd->pdev->dev,
3229 "Unable to allocate IRQ %d\n", dd->pdev->irq);
3230 goto out2;
3231 }
3232 irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
3233
3234 /* Enable interrupts on the HBA. */
3235 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
3236 dd->mmio + HOST_CTL);
3237
3238 init_waitqueue_head(&dd->port->svc_wait);
3239
3240 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
3241 rv = -EFAULT;
3242 goto out3;
3243 }
3244
3245 return rv;
3246
3247 out3:
3248 /* Disable interrupts on the HBA. */
3249 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3250 dd->mmio + HOST_CTL);
3251
3252 /* Release the IRQ. */
3253 irq_set_affinity_hint(dd->pdev->irq, NULL);
3254 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
3255
3256 out2:
3257 mtip_deinit_port(dd->port);
3258 mtip_dma_free(dd);
3259
3260 out1:
3261 /* Free the memory allocated for the for structure. */
3262 kfree(dd->port);
3263
3264 return rv;
3265 }
3266
3267 static void mtip_standby_drive(struct driver_data *dd)
3268 {
3269 if (dd->sr)
3270 return;
3271
3272 /*
3273 * Send standby immediate (E0h) to the drive so that it
3274 * saves its state.
3275 */
3276 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
3277 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
3278 if (mtip_standby_immediate(dd->port))
3279 dev_warn(&dd->pdev->dev,
3280 "STANDBY IMMEDIATE failed\n");
3281 }
3282
3283 /*
3284 * Called to deinitialize an interface.
3285 *
3286 * @dd Pointer to the driver data structure.
3287 *
3288 * return value
3289 * 0
3290 */
3291 static int mtip_hw_exit(struct driver_data *dd)
3292 {
3293 /*
3294 * Send standby immediate (E0h) to the drive so that it
3295 * saves its state.
3296 */
3297 if (!dd->sr) {
3298 /* de-initialize the port. */
3299 mtip_deinit_port(dd->port);
3300
3301 /* Disable interrupts on the HBA. */
3302 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3303 dd->mmio + HOST_CTL);
3304 }
3305
3306 /* Release the IRQ. */
3307 irq_set_affinity_hint(dd->pdev->irq, NULL);
3308 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
3309 msleep(1000);
3310
3311 /* Free dma regions */
3312 mtip_dma_free(dd);
3313
3314 /* Free the memory allocated for the for structure. */
3315 kfree(dd->port);
3316 dd->port = NULL;
3317
3318 return 0;
3319 }
3320
3321 /*
3322 * Issue a Standby Immediate command to the device.
3323 *
3324 * This function is called by the Block Layer just before the
3325 * system powers off during a shutdown.
3326 *
3327 * @dd Pointer to the driver data structure.
3328 *
3329 * return value
3330 * 0
3331 */
3332 static int mtip_hw_shutdown(struct driver_data *dd)
3333 {
3334 /*
3335 * Send standby immediate (E0h) to the drive so that it
3336 * saves its state.
3337 */
3338 if (!dd->sr && dd->port)
3339 mtip_standby_immediate(dd->port);
3340
3341 return 0;
3342 }
3343
3344 /*
3345 * Suspend function
3346 *
3347 * This function is called by the Block Layer just before the
3348 * system hibernates.
3349 *
3350 * @dd Pointer to the driver data structure.
3351 *
3352 * return value
3353 * 0 Suspend was successful
3354 * -EFAULT Suspend was not successful
3355 */
3356 static int mtip_hw_suspend(struct driver_data *dd)
3357 {
3358 /*
3359 * Send standby immediate (E0h) to the drive
3360 * so that it saves its state.
3361 */
3362 if (mtip_standby_immediate(dd->port) != 0) {
3363 dev_err(&dd->pdev->dev,
3364 "Failed standby-immediate command\n");
3365 return -EFAULT;
3366 }
3367
3368 /* Disable interrupts on the HBA.*/
3369 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3370 dd->mmio + HOST_CTL);
3371 mtip_deinit_port(dd->port);
3372
3373 return 0;
3374 }
3375
3376 /*
3377 * Resume function
3378 *
3379 * This function is called by the Block Layer as the
3380 * system resumes.
3381 *
3382 * @dd Pointer to the driver data structure.
3383 *
3384 * return value
3385 * 0 Resume was successful
3386 * -EFAULT Resume was not successful
3387 */
3388 static int mtip_hw_resume(struct driver_data *dd)
3389 {
3390 /* Perform any needed hardware setup steps */
3391 hba_setup(dd);
3392
3393 /* Reset the HBA */
3394 if (mtip_hba_reset(dd) != 0) {
3395 dev_err(&dd->pdev->dev,
3396 "Unable to reset the HBA\n");
3397 return -EFAULT;
3398 }
3399
3400 /*
3401 * Enable the port, DMA engine, and FIS reception specific
3402 * h/w in controller.
3403 */
3404 mtip_init_port(dd->port);
3405 mtip_start_port(dd->port);
3406
3407 /* Enable interrupts on the HBA.*/
3408 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
3409 dd->mmio + HOST_CTL);
3410
3411 return 0;
3412 }
3413
3414 /*
3415 * Helper function for reusing disk name
3416 * upon hot insertion.
3417 */
3418 static int rssd_disk_name_format(char *prefix,
3419 int index,
3420 char *buf,
3421 int buflen)
3422 {
3423 const int base = 'z' - 'a' + 1;
3424 char *begin = buf + strlen(prefix);
3425 char *end = buf + buflen;
3426 char *p;
3427 int unit;
3428
3429 p = end - 1;
3430 *p = '\0';
3431 unit = base;
3432 do {
3433 if (p == begin)
3434 return -EINVAL;
3435 *--p = 'a' + (index % unit);
3436 index = (index / unit) - 1;
3437 } while (index >= 0);
3438
3439 memmove(begin, p, end - p);
3440 memcpy(buf, prefix, strlen(prefix));
3441
3442 return 0;
3443 }
3444
3445 /*
3446 * Block layer IOCTL handler.
3447 *
3448 * @dev Pointer to the block_device structure.
3449 * @mode ignored
3450 * @cmd IOCTL command passed from the user application.
3451 * @arg Argument passed from the user application.
3452 *
3453 * return value
3454 * 0 IOCTL completed successfully.
3455 * -ENOTTY IOCTL not supported or invalid driver data
3456 * structure pointer.
3457 */
3458 static int mtip_block_ioctl(struct block_device *dev,
3459 fmode_t mode,
3460 unsigned cmd,
3461 unsigned long arg)
3462 {
3463 struct driver_data *dd = dev->bd_disk->private_data;
3464
3465 if (!capable(CAP_SYS_ADMIN))
3466 return -EACCES;
3467
3468 if (!dd)
3469 return -ENOTTY;
3470
3471 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3472 return -ENOTTY;
3473
3474 switch (cmd) {
3475 case BLKFLSBUF:
3476 return -ENOTTY;
3477 default:
3478 return mtip_hw_ioctl(dd, cmd, arg);
3479 }
3480 }
3481
3482 #ifdef CONFIG_COMPAT
3483 /*
3484 * Block layer compat IOCTL handler.
3485 *
3486 * @dev Pointer to the block_device structure.
3487 * @mode ignored
3488 * @cmd IOCTL command passed from the user application.
3489 * @arg Argument passed from the user application.
3490 *
3491 * return value
3492 * 0 IOCTL completed successfully.
3493 * -ENOTTY IOCTL not supported or invalid driver data
3494 * structure pointer.
3495 */
3496 static int mtip_block_compat_ioctl(struct block_device *dev,
3497 fmode_t mode,
3498 unsigned cmd,
3499 unsigned long arg)
3500 {
3501 struct driver_data *dd = dev->bd_disk->private_data;
3502
3503 if (!capable(CAP_SYS_ADMIN))
3504 return -EACCES;
3505
3506 if (!dd)
3507 return -ENOTTY;
3508
3509 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3510 return -ENOTTY;
3511
3512 switch (cmd) {
3513 case BLKFLSBUF:
3514 return -ENOTTY;
3515 case HDIO_DRIVE_TASKFILE: {
3516 struct mtip_compat_ide_task_request_s __user *compat_req_task;
3517 ide_task_request_t req_task;
3518 int compat_tasksize, outtotal, ret;
3519
3520 compat_tasksize =
3521 sizeof(struct mtip_compat_ide_task_request_s);
3522
3523 compat_req_task =
3524 (struct mtip_compat_ide_task_request_s __user *) arg;
3525
3526 if (copy_from_user(&req_task, (void __user *) arg,
3527 compat_tasksize - (2 * sizeof(compat_long_t))))
3528 return -EFAULT;
3529
3530 if (get_user(req_task.out_size, &compat_req_task->out_size))
3531 return -EFAULT;
3532
3533 if (get_user(req_task.in_size, &compat_req_task->in_size))
3534 return -EFAULT;
3535
3536 outtotal = sizeof(struct mtip_compat_ide_task_request_s);
3537
3538 ret = exec_drive_taskfile(dd, (void __user *) arg,
3539 &req_task, outtotal);
3540
3541 if (copy_to_user((void __user *) arg, &req_task,
3542 compat_tasksize -
3543 (2 * sizeof(compat_long_t))))
3544 return -EFAULT;
3545
3546 if (put_user(req_task.out_size, &compat_req_task->out_size))
3547 return -EFAULT;
3548
3549 if (put_user(req_task.in_size, &compat_req_task->in_size))
3550 return -EFAULT;
3551
3552 return ret;
3553 }
3554 default:
3555 return mtip_hw_ioctl(dd, cmd, arg);
3556 }
3557 }
3558 #endif
3559
3560 /*
3561 * Obtain the geometry of the device.
3562 *
3563 * You may think that this function is obsolete, but some applications,
3564 * fdisk for example still used CHS values. This function describes the
3565 * device as having 224 heads and 56 sectors per cylinder. These values are
3566 * chosen so that each cylinder is aligned on a 4KB boundary. Since a
3567 * partition is described in terms of a start and end cylinder this means
3568 * that each partition is also 4KB aligned. Non-aligned partitions adversely
3569 * affects performance.
3570 *
3571 * @dev Pointer to the block_device strucutre.
3572 * @geo Pointer to a hd_geometry structure.
3573 *
3574 * return value
3575 * 0 Operation completed successfully.
3576 * -ENOTTY An error occurred while reading the drive capacity.
3577 */
3578 static int mtip_block_getgeo(struct block_device *dev,
3579 struct hd_geometry *geo)
3580 {
3581 struct driver_data *dd = dev->bd_disk->private_data;
3582 sector_t capacity;
3583
3584 if (!dd)
3585 return -ENOTTY;
3586
3587 if (!(mtip_hw_get_capacity(dd, &capacity))) {
3588 dev_warn(&dd->pdev->dev,
3589 "Could not get drive capacity.\n");
3590 return -ENOTTY;
3591 }
3592
3593 geo->heads = 224;
3594 geo->sectors = 56;
3595 sector_div(capacity, (geo->heads * geo->sectors));
3596 geo->cylinders = capacity;
3597 return 0;
3598 }
3599
3600 /*
3601 * Block device operation function.
3602 *
3603 * This structure contains pointers to the functions required by the block
3604 * layer.
3605 */
3606 static const struct block_device_operations mtip_block_ops = {
3607 .ioctl = mtip_block_ioctl,
3608 #ifdef CONFIG_COMPAT
3609 .compat_ioctl = mtip_block_compat_ioctl,
3610 #endif
3611 .getgeo = mtip_block_getgeo,
3612 .owner = THIS_MODULE
3613 };
3614
3615 static inline bool is_se_active(struct driver_data *dd)
3616 {
3617 if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) {
3618 if (dd->port->ic_pause_timer) {
3619 unsigned long to = dd->port->ic_pause_timer +
3620 msecs_to_jiffies(1000);
3621 if (time_after(jiffies, to)) {
3622 clear_bit(MTIP_PF_SE_ACTIVE_BIT,
3623 &dd->port->flags);
3624 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
3625 dd->port->ic_pause_timer = 0;
3626 wake_up_interruptible(&dd->port->svc_wait);
3627 return false;
3628 }
3629 }
3630 return true;
3631 }
3632 return false;
3633 }
3634
3635 /*
3636 * Block layer make request function.
3637 *
3638 * This function is called by the kernel to process a BIO for
3639 * the P320 device.
3640 *
3641 * @queue Pointer to the request queue. Unused other than to obtain
3642 * the driver data structure.
3643 * @rq Pointer to the request.
3644 *
3645 */
3646 static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
3647 {
3648 struct driver_data *dd = hctx->queue->queuedata;
3649 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3650 unsigned int nents;
3651
3652 if (is_se_active(dd))
3653 return -ENODATA;
3654
3655 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
3656 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
3657 &dd->dd_flag))) {
3658 return -ENXIO;
3659 }
3660 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
3661 return -ENODATA;
3662 }
3663 if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
3664 &dd->dd_flag) &&
3665 rq_data_dir(rq))) {
3666 return -ENODATA;
3667 }
3668 if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
3669 return -ENODATA;
3670 if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
3671 return -ENXIO;
3672 }
3673
3674 if (rq->cmd_flags & REQ_DISCARD) {
3675 int err;
3676
3677 err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
3678 blk_mq_end_request(rq, err);
3679 return 0;
3680 }
3681
3682 /* Create the scatter list for this request. */
3683 nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg);
3684
3685 /* Issue the read/write. */
3686 mtip_hw_submit_io(dd, rq, cmd, nents, hctx);
3687 return 0;
3688 }
3689
3690 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
3691 struct request *rq)
3692 {
3693 struct driver_data *dd = hctx->queue->queuedata;
3694 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3695
3696 if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
3697 return false;
3698
3699 /*
3700 * If unaligned depth must be limited on this controller, mark it
3701 * as unaligned if the IO isn't on a 4k boundary (start of length).
3702 */
3703 if (blk_rq_sectors(rq) <= 64) {
3704 if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
3705 cmd->unaligned = 1;
3706 }
3707
3708 if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal))
3709 return true;
3710
3711 return false;
3712 }
3713
3714 static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
3715 const struct blk_mq_queue_data *bd)
3716 {
3717 struct request *rq = bd->rq;
3718 int ret;
3719
3720 if (unlikely(mtip_check_unal_depth(hctx, rq)))
3721 return BLK_MQ_RQ_QUEUE_BUSY;
3722
3723 blk_mq_start_request(rq);
3724
3725 ret = mtip_submit_request(hctx, rq);
3726 if (likely(!ret))
3727 return BLK_MQ_RQ_QUEUE_OK;
3728
3729 rq->errors = ret;
3730 return BLK_MQ_RQ_QUEUE_ERROR;
3731 }
3732
3733 static void mtip_free_cmd(void *data, struct request *rq,
3734 unsigned int hctx_idx, unsigned int request_idx)
3735 {
3736 struct driver_data *dd = data;
3737 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3738
3739 if (!cmd->command)
3740 return;
3741
3742 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3743 cmd->command, cmd->command_dma);
3744 }
3745
3746 static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
3747 unsigned int request_idx, unsigned int numa_node)
3748 {
3749 struct driver_data *dd = data;
3750 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3751 u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
3752
3753 /*
3754 * For flush requests, request_idx starts at the end of the
3755 * tag space. Since we don't support FLUSH/FUA, simply return
3756 * 0 as there's nothing to be done.
3757 */
3758 if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
3759 return 0;
3760
3761 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3762 &cmd->command_dma, GFP_KERNEL);
3763 if (!cmd->command)
3764 return -ENOMEM;
3765
3766 memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
3767
3768 /* Point the command headers at the command tables. */
3769 cmd->command_header = dd->port->command_list +
3770 (sizeof(struct mtip_cmd_hdr) * request_idx);
3771 cmd->command_header_dma = dd->port->command_list_dma +
3772 (sizeof(struct mtip_cmd_hdr) * request_idx);
3773
3774 if (host_cap_64)
3775 cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
3776
3777 cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
3778
3779 sg_init_table(cmd->sg, MTIP_MAX_SG);
3780 return 0;
3781 }
3782
3783 static struct blk_mq_ops mtip_mq_ops = {
3784 .queue_rq = mtip_queue_rq,
3785 .map_queue = blk_mq_map_queue,
3786 .init_request = mtip_init_cmd,
3787 .exit_request = mtip_free_cmd,
3788 };
3789
3790 /*
3791 * Block layer initialization function.
3792 *
3793 * This function is called once by the PCI layer for each P320
3794 * device that is connected to the system.
3795 *
3796 * @dd Pointer to the driver data structure.
3797 *
3798 * return value
3799 * 0 on success else an error code.
3800 */
3801 static int mtip_block_initialize(struct driver_data *dd)
3802 {
3803 int rv = 0, wait_for_rebuild = 0;
3804 sector_t capacity;
3805 unsigned int index = 0;
3806 struct kobject *kobj;
3807
3808 if (dd->disk)
3809 goto skip_create_disk; /* hw init done, before rebuild */
3810
3811 if (mtip_hw_init(dd)) {
3812 rv = -EINVAL;
3813 goto protocol_init_error;
3814 }
3815
3816 dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
3817 if (dd->disk == NULL) {
3818 dev_err(&dd->pdev->dev,
3819 "Unable to allocate gendisk structure\n");
3820 rv = -EINVAL;
3821 goto alloc_disk_error;
3822 }
3823
3824 /* Generate the disk name, implemented same as in sd.c */
3825 do {
3826 if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
3827 goto ida_get_error;
3828
3829 spin_lock(&rssd_index_lock);
3830 rv = ida_get_new(&rssd_index_ida, &index);
3831 spin_unlock(&rssd_index_lock);
3832 } while (rv == -EAGAIN);
3833
3834 if (rv)
3835 goto ida_get_error;
3836
3837 rv = rssd_disk_name_format("rssd",
3838 index,
3839 dd->disk->disk_name,
3840 DISK_NAME_LEN);
3841 if (rv)
3842 goto disk_index_error;
3843
3844 dd->disk->driverfs_dev = &dd->pdev->dev;
3845 dd->disk->major = dd->major;
3846 dd->disk->first_minor = index * MTIP_MAX_MINORS;
3847 dd->disk->minors = MTIP_MAX_MINORS;
3848 dd->disk->fops = &mtip_block_ops;
3849 dd->disk->private_data = dd;
3850 dd->index = index;
3851
3852 mtip_hw_debugfs_init(dd);
3853
3854 skip_create_disk:
3855 memset(&dd->tags, 0, sizeof(dd->tags));
3856 dd->tags.ops = &mtip_mq_ops;
3857 dd->tags.nr_hw_queues = 1;
3858 dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
3859 dd->tags.reserved_tags = 1;
3860 dd->tags.cmd_size = sizeof(struct mtip_cmd);
3861 dd->tags.numa_node = dd->numa_node;
3862 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
3863 dd->tags.driver_data = dd;
3864
3865 rv = blk_mq_alloc_tag_set(&dd->tags);
3866 if (rv) {
3867 dev_err(&dd->pdev->dev,
3868 "Unable to allocate request queue\n");
3869 goto block_queue_alloc_init_error;
3870 }
3871
3872 /* Allocate the request queue. */
3873 dd->queue = blk_mq_init_queue(&dd->tags);
3874 if (IS_ERR(dd->queue)) {
3875 dev_err(&dd->pdev->dev,
3876 "Unable to allocate request queue\n");
3877 rv = -ENOMEM;
3878 goto block_queue_alloc_init_error;
3879 }
3880
3881 dd->disk->queue = dd->queue;
3882 dd->queue->queuedata = dd;
3883
3884 /* Initialize the protocol layer. */
3885 wait_for_rebuild = mtip_hw_get_identify(dd);
3886 if (wait_for_rebuild < 0) {
3887 dev_err(&dd->pdev->dev,
3888 "Protocol layer initialization failed\n");
3889 rv = -EINVAL;
3890 goto init_hw_cmds_error;
3891 }
3892
3893 /*
3894 * if rebuild pending, start the service thread, and delay the block
3895 * queue creation and add_disk()
3896 */
3897 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
3898 goto start_service_thread;
3899
3900 /* Set device limits. */
3901 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
3902 clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags);
3903 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
3904 blk_queue_physical_block_size(dd->queue, 4096);
3905 blk_queue_max_hw_sectors(dd->queue, 0xffff);
3906 blk_queue_max_segment_size(dd->queue, 0x400000);
3907 blk_queue_io_min(dd->queue, 4096);
3908 blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
3909
3910 /*
3911 * write back cache is not supported in the device. FUA depends on
3912 * write back cache support, hence setting flush support to zero.
3913 */
3914 blk_queue_flush(dd->queue, 0);
3915
3916 /* Signal trim support */
3917 if (dd->trim_supp == true) {
3918 set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
3919 dd->queue->limits.discard_granularity = 4096;
3920 blk_queue_max_discard_sectors(dd->queue,
3921 MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
3922 dd->queue->limits.discard_zeroes_data = 0;
3923 }
3924
3925 /* Set the capacity of the device in 512 byte sectors. */
3926 if (!(mtip_hw_get_capacity(dd, &capacity))) {
3927 dev_warn(&dd->pdev->dev,
3928 "Could not read drive capacity\n");
3929 rv = -EIO;
3930 goto read_capacity_error;
3931 }
3932 set_capacity(dd->disk, capacity);
3933
3934 /* Enable the block device and add it to /dev */
3935 add_disk(dd->disk);
3936
3937 dd->bdev = bdget_disk(dd->disk, 0);
3938 /*
3939 * Now that the disk is active, initialize any sysfs attributes
3940 * managed by the protocol layer.
3941 */
3942 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
3943 if (kobj) {
3944 mtip_hw_sysfs_init(dd, kobj);
3945 kobject_put(kobj);
3946 }
3947
3948 if (dd->mtip_svc_handler) {
3949 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3950 return rv; /* service thread created for handling rebuild */
3951 }
3952
3953 start_service_thread:
3954 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
3955 dd, dd->numa_node,
3956 "mtip_svc_thd_%02d", index);
3957
3958 if (IS_ERR(dd->mtip_svc_handler)) {
3959 dev_err(&dd->pdev->dev, "service thread failed to start\n");
3960 dd->mtip_svc_handler = NULL;
3961 rv = -EFAULT;
3962 goto kthread_run_error;
3963 }
3964 wake_up_process(dd->mtip_svc_handler);
3965 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
3966 rv = wait_for_rebuild;
3967
3968 return rv;
3969
3970 kthread_run_error:
3971 bdput(dd->bdev);
3972 dd->bdev = NULL;
3973
3974 /* Delete our gendisk. This also removes the device from /dev */
3975 del_gendisk(dd->disk);
3976
3977 read_capacity_error:
3978 init_hw_cmds_error:
3979 blk_cleanup_queue(dd->queue);
3980 blk_mq_free_tag_set(&dd->tags);
3981 block_queue_alloc_init_error:
3982 mtip_hw_debugfs_exit(dd);
3983 disk_index_error:
3984 spin_lock(&rssd_index_lock);
3985 ida_remove(&rssd_index_ida, index);
3986 spin_unlock(&rssd_index_lock);
3987
3988 ida_get_error:
3989 put_disk(dd->disk);
3990
3991 alloc_disk_error:
3992 mtip_hw_exit(dd); /* De-initialize the protocol layer. */
3993
3994 protocol_init_error:
3995 return rv;
3996 }
3997
3998 /*
3999 * Block layer deinitialization function.
4000 *
4001 * Called by the PCI layer as each P320 device is removed.
4002 *
4003 * @dd Pointer to the driver data structure.
4004 *
4005 * return value
4006 * 0
4007 */
4008 static int mtip_block_remove(struct driver_data *dd)
4009 {
4010 struct kobject *kobj;
4011
4012 mtip_hw_debugfs_exit(dd);
4013
4014 if (dd->mtip_svc_handler) {
4015 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
4016 wake_up_interruptible(&dd->port->svc_wait);
4017 kthread_stop(dd->mtip_svc_handler);
4018 }
4019
4020 /* Clean up the sysfs attributes, if created */
4021 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
4022 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
4023 if (kobj) {
4024 mtip_hw_sysfs_exit(dd, kobj);
4025 kobject_put(kobj);
4026 }
4027 }
4028
4029 if (!dd->sr)
4030 mtip_standby_drive(dd);
4031 else
4032 dev_info(&dd->pdev->dev, "device %s surprise removal\n",
4033 dd->disk->disk_name);
4034
4035 /*
4036 * Delete our gendisk structure. This also removes the device
4037 * from /dev
4038 */
4039 if (dd->bdev) {
4040 bdput(dd->bdev);
4041 dd->bdev = NULL;
4042 }
4043 if (dd->disk) {
4044 del_gendisk(dd->disk);
4045 if (dd->disk->queue) {
4046 blk_cleanup_queue(dd->queue);
4047 blk_mq_free_tag_set(&dd->tags);
4048 dd->queue = NULL;
4049 }
4050 put_disk(dd->disk);
4051 }
4052 dd->disk = NULL;
4053
4054 spin_lock(&rssd_index_lock);
4055 ida_remove(&rssd_index_ida, dd->index);
4056 spin_unlock(&rssd_index_lock);
4057
4058 /* De-initialize the protocol layer. */
4059 mtip_hw_exit(dd);
4060
4061 return 0;
4062 }
4063
4064 /*
4065 * Function called by the PCI layer when just before the
4066 * machine shuts down.
4067 *
4068 * If a protocol layer shutdown function is present it will be called
4069 * by this function.
4070 *
4071 * @dd Pointer to the driver data structure.
4072 *
4073 * return value
4074 * 0
4075 */
4076 static int mtip_block_shutdown(struct driver_data *dd)
4077 {
4078 mtip_hw_shutdown(dd);
4079
4080 /* Delete our gendisk structure, and cleanup the blk queue. */
4081 if (dd->disk) {
4082 dev_info(&dd->pdev->dev,
4083 "Shutting down %s ...\n", dd->disk->disk_name);
4084
4085 del_gendisk(dd->disk);
4086 if (dd->disk->queue) {
4087 blk_cleanup_queue(dd->queue);
4088 blk_mq_free_tag_set(&dd->tags);
4089 }
4090 put_disk(dd->disk);
4091 dd->disk = NULL;
4092 dd->queue = NULL;
4093 }
4094
4095 spin_lock(&rssd_index_lock);
4096 ida_remove(&rssd_index_ida, dd->index);
4097 spin_unlock(&rssd_index_lock);
4098 return 0;
4099 }
4100
4101 static int mtip_block_suspend(struct driver_data *dd)
4102 {
4103 dev_info(&dd->pdev->dev,
4104 "Suspending %s ...\n", dd->disk->disk_name);
4105 mtip_hw_suspend(dd);
4106 return 0;
4107 }
4108
4109 static int mtip_block_resume(struct driver_data *dd)
4110 {
4111 dev_info(&dd->pdev->dev, "Resuming %s ...\n",
4112 dd->disk->disk_name);
4113 mtip_hw_resume(dd);
4114 return 0;
4115 }
4116
4117 static void drop_cpu(int cpu)
4118 {
4119 cpu_use[cpu]--;
4120 }
4121
4122 static int get_least_used_cpu_on_node(int node)
4123 {
4124 int cpu, least_used_cpu, least_cnt;
4125 const struct cpumask *node_mask;
4126
4127 node_mask = cpumask_of_node(node);
4128 least_used_cpu = cpumask_first(node_mask);
4129 least_cnt = cpu_use[least_used_cpu];
4130 cpu = least_used_cpu;
4131
4132 for_each_cpu(cpu, node_mask) {
4133 if (cpu_use[cpu] < least_cnt) {
4134 least_used_cpu = cpu;
4135 least_cnt = cpu_use[cpu];
4136 }
4137 }
4138 cpu_use[least_used_cpu]++;
4139 return least_used_cpu;
4140 }
4141
4142 /* Helper for selecting a node in round robin mode */
4143 static inline int mtip_get_next_rr_node(void)
4144 {
4145 static int next_node = -1;
4146
4147 if (next_node == -1) {
4148 next_node = first_online_node;
4149 return next_node;
4150 }
4151
4152 next_node = next_online_node(next_node);
4153 if (next_node == MAX_NUMNODES)
4154 next_node = first_online_node;
4155 return next_node;
4156 }
4157
4158 static DEFINE_HANDLER(0);
4159 static DEFINE_HANDLER(1);
4160 static DEFINE_HANDLER(2);
4161 static DEFINE_HANDLER(3);
4162 static DEFINE_HANDLER(4);
4163 static DEFINE_HANDLER(5);
4164 static DEFINE_HANDLER(6);
4165 static DEFINE_HANDLER(7);
4166
4167 static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
4168 {
4169 int pos;
4170 unsigned short pcie_dev_ctrl;
4171
4172 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
4173 if (pos) {
4174 pci_read_config_word(pdev,
4175 pos + PCI_EXP_DEVCTL,
4176 &pcie_dev_ctrl);
4177 if (pcie_dev_ctrl & (1 << 11) ||
4178 pcie_dev_ctrl & (1 << 4)) {
4179 dev_info(&dd->pdev->dev,
4180 "Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
4181 pdev->vendor, pdev->device);
4182 pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
4183 PCI_EXP_DEVCTL_RELAX_EN);
4184 pci_write_config_word(pdev,
4185 pos + PCI_EXP_DEVCTL,
4186 pcie_dev_ctrl);
4187 }
4188 }
4189 }
4190
4191 static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
4192 {
4193 /*
4194 * This workaround is specific to AMD/ATI chipset with a PCI upstream
4195 * device with device id 0x5aXX
4196 */
4197 if (pdev->bus && pdev->bus->self) {
4198 if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
4199 ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
4200 mtip_disable_link_opts(dd, pdev->bus->self);
4201 } else {
4202 /* Check further up the topology */
4203 struct pci_dev *parent_dev = pdev->bus->self;
4204 if (parent_dev->bus &&
4205 parent_dev->bus->parent &&
4206 parent_dev->bus->parent->self &&
4207 parent_dev->bus->parent->self->vendor ==
4208 PCI_VENDOR_ID_ATI &&
4209 (parent_dev->bus->parent->self->device &
4210 0xff00) == 0x5a00) {
4211 mtip_disable_link_opts(dd,
4212 parent_dev->bus->parent->self);
4213 }
4214 }
4215 }
4216 }
4217
4218 /*
4219 * Called for each supported PCI device detected.
4220 *
4221 * This function allocates the private data structure, enables the
4222 * PCI device and then calls the block layer initialization function.
4223 *
4224 * return value
4225 * 0 on success else an error code.
4226 */
4227 static int mtip_pci_probe(struct pci_dev *pdev,
4228 const struct pci_device_id *ent)
4229 {
4230 int rv = 0;
4231 struct driver_data *dd = NULL;
4232 char cpu_list[256];
4233 const struct cpumask *node_mask;
4234 int cpu, i = 0, j = 0;
4235 int my_node = NUMA_NO_NODE;
4236 unsigned long flags;
4237
4238 /* Allocate memory for this devices private data. */
4239 my_node = pcibus_to_node(pdev->bus);
4240 if (my_node != NUMA_NO_NODE) {
4241 if (!node_online(my_node))
4242 my_node = mtip_get_next_rr_node();
4243 } else {
4244 dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
4245 my_node = mtip_get_next_rr_node();
4246 }
4247 dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
4248 my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
4249 cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
4250
4251 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
4252 if (dd == NULL) {
4253 dev_err(&pdev->dev,
4254 "Unable to allocate memory for driver data\n");
4255 return -ENOMEM;
4256 }
4257
4258 /* Attach the private data to this PCI device. */
4259 pci_set_drvdata(pdev, dd);
4260
4261 rv = pcim_enable_device(pdev);
4262 if (rv < 0) {
4263 dev_err(&pdev->dev, "Unable to enable device\n");
4264 goto iomap_err;
4265 }
4266
4267 /* Map BAR5 to memory. */
4268 rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
4269 if (rv < 0) {
4270 dev_err(&pdev->dev, "Unable to map regions\n");
4271 goto iomap_err;
4272 }
4273
4274 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4275 rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4276
4277 if (rv) {
4278 rv = pci_set_consistent_dma_mask(pdev,
4279 DMA_BIT_MASK(32));
4280 if (rv) {
4281 dev_warn(&pdev->dev,
4282 "64-bit DMA enable failed\n");
4283 goto setmask_err;
4284 }
4285 }
4286 }
4287
4288 /* Copy the info we may need later into the private data structure. */
4289 dd->major = mtip_major;
4290 dd->instance = instance;
4291 dd->pdev = pdev;
4292 dd->numa_node = my_node;
4293
4294 INIT_LIST_HEAD(&dd->online_list);
4295 INIT_LIST_HEAD(&dd->remove_list);
4296
4297 memset(dd->workq_name, 0, 32);
4298 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
4299
4300 dd->isr_workq = create_workqueue(dd->workq_name);
4301 if (!dd->isr_workq) {
4302 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
4303 rv = -ENOMEM;
4304 goto block_initialize_err;
4305 }
4306
4307 memset(cpu_list, 0, sizeof(cpu_list));
4308
4309 node_mask = cpumask_of_node(dd->numa_node);
4310 if (!cpumask_empty(node_mask)) {
4311 for_each_cpu(cpu, node_mask)
4312 {
4313 snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
4314 j = strlen(cpu_list);
4315 }
4316
4317 dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
4318 dd->numa_node,
4319 topology_physical_package_id(cpumask_first(node_mask)),
4320 nr_cpus_node(dd->numa_node),
4321 cpu_list);
4322 } else
4323 dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
4324
4325 dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
4326 dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
4327 cpu_to_node(dd->isr_binding), dd->isr_binding);
4328
4329 /* first worker context always runs in ISR */
4330 dd->work[0].cpu_binding = dd->isr_binding;
4331 dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
4332 dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
4333 dd->work[3].cpu_binding = dd->work[0].cpu_binding;
4334 dd->work[4].cpu_binding = dd->work[1].cpu_binding;
4335 dd->work[5].cpu_binding = dd->work[2].cpu_binding;
4336 dd->work[6].cpu_binding = dd->work[2].cpu_binding;
4337 dd->work[7].cpu_binding = dd->work[1].cpu_binding;
4338
4339 /* Log the bindings */
4340 for_each_present_cpu(cpu) {
4341 memset(cpu_list, 0, sizeof(cpu_list));
4342 for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
4343 if (dd->work[i].cpu_binding == cpu) {
4344 snprintf(&cpu_list[j], 256 - j, "%d ", i);
4345 j = strlen(cpu_list);
4346 }
4347 }
4348 if (j)
4349 dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
4350 }
4351
4352 INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
4353 INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
4354 INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
4355 INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
4356 INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
4357 INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
4358 INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
4359 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
4360
4361 pci_set_master(pdev);
4362 rv = pci_enable_msi(pdev);
4363 if (rv) {
4364 dev_warn(&pdev->dev,
4365 "Unable to enable MSI interrupt.\n");
4366 goto msi_initialize_err;
4367 }
4368
4369 mtip_fix_ero_nosnoop(dd, pdev);
4370
4371 /* Initialize the block layer. */
4372 rv = mtip_block_initialize(dd);
4373 if (rv < 0) {
4374 dev_err(&pdev->dev,
4375 "Unable to initialize block layer\n");
4376 goto block_initialize_err;
4377 }
4378
4379 /*
4380 * Increment the instance count so that each device has a unique
4381 * instance number.
4382 */
4383 instance++;
4384 if (rv != MTIP_FTL_REBUILD_MAGIC)
4385 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
4386 else
4387 rv = 0; /* device in rebuild state, return 0 from probe */
4388
4389 /* Add to online list even if in ftl rebuild */
4390 spin_lock_irqsave(&dev_lock, flags);
4391 list_add(&dd->online_list, &online_list);
4392 spin_unlock_irqrestore(&dev_lock, flags);
4393
4394 goto done;
4395
4396 block_initialize_err:
4397 pci_disable_msi(pdev);
4398
4399 msi_initialize_err:
4400 if (dd->isr_workq) {
4401 flush_workqueue(dd->isr_workq);
4402 destroy_workqueue(dd->isr_workq);
4403 drop_cpu(dd->work[0].cpu_binding);
4404 drop_cpu(dd->work[1].cpu_binding);
4405 drop_cpu(dd->work[2].cpu_binding);
4406 }
4407 setmask_err:
4408 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
4409
4410 iomap_err:
4411 kfree(dd);
4412 pci_set_drvdata(pdev, NULL);
4413 return rv;
4414 done:
4415 return rv;
4416 }
4417
4418 /*
4419 * Called for each probed device when the device is removed or the
4420 * driver is unloaded.
4421 *
4422 * return value
4423 * None
4424 */
4425 static void mtip_pci_remove(struct pci_dev *pdev)
4426 {
4427 struct driver_data *dd = pci_get_drvdata(pdev);
4428 unsigned long flags, to;
4429
4430 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
4431
4432 spin_lock_irqsave(&dev_lock, flags);
4433 list_del_init(&dd->online_list);
4434 list_add(&dd->remove_list, &removing_list);
4435 spin_unlock_irqrestore(&dev_lock, flags);
4436
4437 mtip_check_surprise_removal(pdev);
4438 synchronize_irq(dd->pdev->irq);
4439
4440 /* Spin until workers are done */
4441 to = jiffies + msecs_to_jiffies(4000);
4442 do {
4443 msleep(20);
4444 } while (atomic_read(&dd->irq_workers_active) != 0 &&
4445 time_before(jiffies, to));
4446
4447 if (atomic_read(&dd->irq_workers_active) != 0) {
4448 dev_warn(&dd->pdev->dev,
4449 "Completion workers still active!\n");
4450 }
4451
4452 blk_mq_stop_hw_queues(dd->queue);
4453 /* Clean up the block layer. */
4454 mtip_block_remove(dd);
4455
4456 if (dd->isr_workq) {
4457 flush_workqueue(dd->isr_workq);
4458 destroy_workqueue(dd->isr_workq);
4459 drop_cpu(dd->work[0].cpu_binding);
4460 drop_cpu(dd->work[1].cpu_binding);
4461 drop_cpu(dd->work[2].cpu_binding);
4462 }
4463
4464 pci_disable_msi(pdev);
4465
4466 spin_lock_irqsave(&dev_lock, flags);
4467 list_del_init(&dd->remove_list);
4468 spin_unlock_irqrestore(&dev_lock, flags);
4469
4470 kfree(dd);
4471
4472 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
4473 pci_set_drvdata(pdev, NULL);
4474 }
4475
4476 /*
4477 * Called for each probed device when the device is suspended.
4478 *
4479 * return value
4480 * 0 Success
4481 * <0 Error
4482 */
4483 static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
4484 {
4485 int rv = 0;
4486 struct driver_data *dd = pci_get_drvdata(pdev);
4487
4488 if (!dd) {
4489 dev_err(&pdev->dev,
4490 "Driver private datastructure is NULL\n");
4491 return -EFAULT;
4492 }
4493
4494 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
4495
4496 /* Disable ports & interrupts then send standby immediate */
4497 rv = mtip_block_suspend(dd);
4498 if (rv < 0) {
4499 dev_err(&pdev->dev,
4500 "Failed to suspend controller\n");
4501 return rv;
4502 }
4503
4504 /*
4505 * Save the pci config space to pdev structure &
4506 * disable the device
4507 */
4508 pci_save_state(pdev);
4509 pci_disable_device(pdev);
4510
4511 /* Move to Low power state*/
4512 pci_set_power_state(pdev, PCI_D3hot);
4513
4514 return rv;
4515 }
4516
4517 /*
4518 * Called for each probed device when the device is resumed.
4519 *
4520 * return value
4521 * 0 Success
4522 * <0 Error
4523 */
4524 static int mtip_pci_resume(struct pci_dev *pdev)
4525 {
4526 int rv = 0;
4527 struct driver_data *dd;
4528
4529 dd = pci_get_drvdata(pdev);
4530 if (!dd) {
4531 dev_err(&pdev->dev,
4532 "Driver private datastructure is NULL\n");
4533 return -EFAULT;
4534 }
4535
4536 /* Move the device to active State */
4537 pci_set_power_state(pdev, PCI_D0);
4538
4539 /* Restore PCI configuration space */
4540 pci_restore_state(pdev);
4541
4542 /* Enable the PCI device*/
4543 rv = pcim_enable_device(pdev);
4544 if (rv < 0) {
4545 dev_err(&pdev->dev,
4546 "Failed to enable card during resume\n");
4547 goto err;
4548 }
4549 pci_set_master(pdev);
4550
4551 /*
4552 * Calls hbaReset, initPort, & startPort function
4553 * then enables interrupts
4554 */
4555 rv = mtip_block_resume(dd);
4556 if (rv < 0)
4557 dev_err(&pdev->dev, "Unable to resume\n");
4558
4559 err:
4560 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
4561
4562 return rv;
4563 }
4564
4565 /*
4566 * Shutdown routine
4567 *
4568 * return value
4569 * None
4570 */
4571 static void mtip_pci_shutdown(struct pci_dev *pdev)
4572 {
4573 struct driver_data *dd = pci_get_drvdata(pdev);
4574 if (dd)
4575 mtip_block_shutdown(dd);
4576 }
4577
4578 /* Table of device ids supported by this driver. */
4579 static const struct pci_device_id mtip_pci_tbl[] = {
4580 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) },
4581 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) },
4582 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) },
4583 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) },
4584 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) },
4585 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) },
4586 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) },
4587 { 0 }
4588 };
4589
4590 /* Structure that describes the PCI driver functions. */
4591 static struct pci_driver mtip_pci_driver = {
4592 .name = MTIP_DRV_NAME,
4593 .id_table = mtip_pci_tbl,
4594 .probe = mtip_pci_probe,
4595 .remove = mtip_pci_remove,
4596 .suspend = mtip_pci_suspend,
4597 .resume = mtip_pci_resume,
4598 .shutdown = mtip_pci_shutdown,
4599 };
4600
4601 MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
4602
4603 /*
4604 * Module initialization function.
4605 *
4606 * Called once when the module is loaded. This function allocates a major
4607 * block device number to the Cyclone devices and registers the PCI layer
4608 * of the driver.
4609 *
4610 * Return value
4611 * 0 on success else error code.
4612 */
4613 static int __init mtip_init(void)
4614 {
4615 int error;
4616
4617 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
4618
4619 spin_lock_init(&dev_lock);
4620
4621 INIT_LIST_HEAD(&online_list);
4622 INIT_LIST_HEAD(&removing_list);
4623
4624 /* Allocate a major block device number to use with this driver. */
4625 error = register_blkdev(0, MTIP_DRV_NAME);
4626 if (error <= 0) {
4627 pr_err("Unable to register block device (%d)\n",
4628 error);
4629 return -EBUSY;
4630 }
4631 mtip_major = error;
4632
4633 dfs_parent = debugfs_create_dir("rssd", NULL);
4634 if (IS_ERR_OR_NULL(dfs_parent)) {
4635 pr_warn("Error creating debugfs parent\n");
4636 dfs_parent = NULL;
4637 }
4638 if (dfs_parent) {
4639 dfs_device_status = debugfs_create_file("device_status",
4640 S_IRUGO, dfs_parent, NULL,
4641 &mtip_device_status_fops);
4642 if (IS_ERR_OR_NULL(dfs_device_status)) {
4643 pr_err("Error creating device_status node\n");
4644 dfs_device_status = NULL;
4645 }
4646 }
4647
4648 /* Register our PCI operations. */
4649 error = pci_register_driver(&mtip_pci_driver);
4650 if (error) {
4651 debugfs_remove(dfs_parent);
4652 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4653 }
4654
4655 return error;
4656 }
4657
4658 /*
4659 * Module de-initialization function.
4660 *
4661 * Called once when the module is unloaded. This function deallocates
4662 * the major block device number allocated by mtip_init() and
4663 * unregisters the PCI layer of the driver.
4664 *
4665 * Return value
4666 * none
4667 */
4668 static void __exit mtip_exit(void)
4669 {
4670 /* Release the allocated major block device number. */
4671 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4672
4673 /* Unregister the PCI driver. */
4674 pci_unregister_driver(&mtip_pci_driver);
4675
4676 debugfs_remove_recursive(dfs_parent);
4677 }
4678
4679 MODULE_AUTHOR("Micron Technology, Inc");
4680 MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
4681 MODULE_LICENSE("GPL");
4682 MODULE_VERSION(MTIP_DRV_VERSION);
4683
4684 module_init(mtip_init);
4685 module_exit(mtip_exit);
4686
4687
4688
4689
4690
4691 /* LDV_COMMENT_BEGIN_MAIN */
4692 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
4693
4694 /*###########################################################################*/
4695
4696 /*############## Driver Environment Generator 0.2 output ####################*/
4697
4698 /*###########################################################################*/
4699
4700
4701
4702 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
4703 void ldv_check_final_state(void);
4704
4705 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
4706 void ldv_check_return_value(int res);
4707
4708 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
4709 void ldv_check_return_value_probe(int res);
4710
4711 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
4712 void ldv_initialize(void);
4713
4714 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
4715 void ldv_handler_precall(void);
4716
4717 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
4718 int nondet_int(void);
4719
4720 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
4721 int LDV_IN_INTERRUPT;
4722
4723 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
4724 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
4725
4726
4727
4728 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
4729 /*============================= VARIABLE DECLARATION PART =============================*/
4730 /** STRUCT: struct type: file_operations, struct name: mtip_device_status_fops **/
4731 /* content: static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, size_t len, loff_t *offset)*/
4732 /* LDV_COMMENT_BEGIN_PREP */
4733 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
4734 #define AHCI_RX_FIS_SZ 0x100
4735 #define AHCI_RX_FIS_OFFSET 0x0
4736 #define AHCI_IDFY_SZ ATA_SECT_SIZE
4737 #define AHCI_IDFY_OFFSET 0x400
4738 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
4739 #define AHCI_SECTBUF_OFFSET 0x800
4740 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
4741 #define AHCI_SMARTBUF_OFFSET 0xC00
4742 #define BLOCK_DMA_ALLOC_SZ 4096
4743 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
4744 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
4745 #define AHCI_CMD_TBL_OFFSET 0x0
4746 #define AHCI_CMD_TBL_HDR_SZ 0x80
4747 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
4748 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
4749 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
4750 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
4751 #define HOST_CAP_NZDMA (1 << 19)
4752 #define HOST_HSORG 0xFC
4753 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
4754 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
4755 #define HSORG_HWREV 0xFF00
4756 #define HSORG_STYLE 0x8
4757 #define HSORG_SLOTGROUPS 0x7
4758 #define PORT_COMMAND_ISSUE 0x38
4759 #define PORT_SDBV 0x7C
4760 #define PORT_OFFSET 0x100
4761 #define PORT_MEM_SIZE 0x80
4762 #define PORT_IRQ_ERR \
4763 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
4764 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
4765 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
4766 PORT_IRQ_OVERFLOW)
4767 #define PORT_IRQ_LEGACY \
4768 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
4769 #define PORT_IRQ_HANDLED \
4770 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
4771 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
4772 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
4773 #define DEF_PORT_IRQ \
4774 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
4775 #define MTIP_PRODUCT_UNKNOWN 0x00
4776 #define MTIP_PRODUCT_ASICFPGA 0x11
4777 #ifdef CONFIG_COMPAT
4778 #endif
4779 #ifdef __LITTLE_ENDIAN
4780 #else
4781 #endif
4782 #ifdef MTIP_TRIM
4783 #endif
4784 /* LDV_COMMENT_END_PREP */
4785 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_device_status" */
4786 struct file * var_group1;
4787 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_device_status" */
4788 char __user * var_mtip_hw_read_device_status_47_p1;
4789 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_device_status" */
4790 size_t var_mtip_hw_read_device_status_47_p2;
4791 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_device_status" */
4792 loff_t * var_mtip_hw_read_device_status_47_p3;
4793 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mtip_hw_read_device_status" */
4794 static ssize_t res_mtip_hw_read_device_status_47;
4795 /* LDV_COMMENT_BEGIN_PREP */
4796 #ifdef CONFIG_COMPAT
4797 #endif
4798 #ifdef CONFIG_COMPAT
4799 #endif
4800 /* LDV_COMMENT_END_PREP */
4801
4802 /** STRUCT: struct type: file_operations, struct name: mtip_regs_fops **/
4803 /* content: static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, size_t len, loff_t *offset)*/
4804 /* LDV_COMMENT_BEGIN_PREP */
4805 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
4806 #define AHCI_RX_FIS_SZ 0x100
4807 #define AHCI_RX_FIS_OFFSET 0x0
4808 #define AHCI_IDFY_SZ ATA_SECT_SIZE
4809 #define AHCI_IDFY_OFFSET 0x400
4810 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
4811 #define AHCI_SECTBUF_OFFSET 0x800
4812 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
4813 #define AHCI_SMARTBUF_OFFSET 0xC00
4814 #define BLOCK_DMA_ALLOC_SZ 4096
4815 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
4816 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
4817 #define AHCI_CMD_TBL_OFFSET 0x0
4818 #define AHCI_CMD_TBL_HDR_SZ 0x80
4819 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
4820 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
4821 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
4822 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
4823 #define HOST_CAP_NZDMA (1 << 19)
4824 #define HOST_HSORG 0xFC
4825 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
4826 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
4827 #define HSORG_HWREV 0xFF00
4828 #define HSORG_STYLE 0x8
4829 #define HSORG_SLOTGROUPS 0x7
4830 #define PORT_COMMAND_ISSUE 0x38
4831 #define PORT_SDBV 0x7C
4832 #define PORT_OFFSET 0x100
4833 #define PORT_MEM_SIZE 0x80
4834 #define PORT_IRQ_ERR \
4835 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
4836 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
4837 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
4838 PORT_IRQ_OVERFLOW)
4839 #define PORT_IRQ_LEGACY \
4840 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
4841 #define PORT_IRQ_HANDLED \
4842 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
4843 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
4844 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
4845 #define DEF_PORT_IRQ \
4846 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
4847 #define MTIP_PRODUCT_UNKNOWN 0x00
4848 #define MTIP_PRODUCT_ASICFPGA 0x11
4849 #ifdef CONFIG_COMPAT
4850 #endif
4851 #ifdef __LITTLE_ENDIAN
4852 #else
4853 #endif
4854 #ifdef MTIP_TRIM
4855 #endif
4856 /* LDV_COMMENT_END_PREP */
4857 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_registers" */
4858 char __user * var_mtip_hw_read_registers_48_p1;
4859 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_registers" */
4860 size_t var_mtip_hw_read_registers_48_p2;
4861 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_registers" */
4862 loff_t * var_mtip_hw_read_registers_48_p3;
4863 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mtip_hw_read_registers" */
4864 static ssize_t res_mtip_hw_read_registers_48;
4865 /* LDV_COMMENT_BEGIN_PREP */
4866 #ifdef CONFIG_COMPAT
4867 #endif
4868 #ifdef CONFIG_COMPAT
4869 #endif
4870 /* LDV_COMMENT_END_PREP */
4871
4872 /** STRUCT: struct type: file_operations, struct name: mtip_flags_fops **/
4873 /* content: static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, size_t len, loff_t *offset)*/
4874 /* LDV_COMMENT_BEGIN_PREP */
4875 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
4876 #define AHCI_RX_FIS_SZ 0x100
4877 #define AHCI_RX_FIS_OFFSET 0x0
4878 #define AHCI_IDFY_SZ ATA_SECT_SIZE
4879 #define AHCI_IDFY_OFFSET 0x400
4880 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
4881 #define AHCI_SECTBUF_OFFSET 0x800
4882 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
4883 #define AHCI_SMARTBUF_OFFSET 0xC00
4884 #define BLOCK_DMA_ALLOC_SZ 4096
4885 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
4886 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
4887 #define AHCI_CMD_TBL_OFFSET 0x0
4888 #define AHCI_CMD_TBL_HDR_SZ 0x80
4889 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
4890 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
4891 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
4892 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
4893 #define HOST_CAP_NZDMA (1 << 19)
4894 #define HOST_HSORG 0xFC
4895 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
4896 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
4897 #define HSORG_HWREV 0xFF00
4898 #define HSORG_STYLE 0x8
4899 #define HSORG_SLOTGROUPS 0x7
4900 #define PORT_COMMAND_ISSUE 0x38
4901 #define PORT_SDBV 0x7C
4902 #define PORT_OFFSET 0x100
4903 #define PORT_MEM_SIZE 0x80
4904 #define PORT_IRQ_ERR \
4905 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
4906 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
4907 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
4908 PORT_IRQ_OVERFLOW)
4909 #define PORT_IRQ_LEGACY \
4910 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
4911 #define PORT_IRQ_HANDLED \
4912 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
4913 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
4914 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
4915 #define DEF_PORT_IRQ \
4916 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
4917 #define MTIP_PRODUCT_UNKNOWN 0x00
4918 #define MTIP_PRODUCT_ASICFPGA 0x11
4919 #ifdef CONFIG_COMPAT
4920 #endif
4921 #ifdef __LITTLE_ENDIAN
4922 #else
4923 #endif
4924 #ifdef MTIP_TRIM
4925 #endif
4926 /* LDV_COMMENT_END_PREP */
4927 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_flags" */
4928 char __user * var_mtip_hw_read_flags_49_p1;
4929 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_flags" */
4930 size_t var_mtip_hw_read_flags_49_p2;
4931 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_hw_read_flags" */
4932 loff_t * var_mtip_hw_read_flags_49_p3;
4933 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mtip_hw_read_flags" */
4934 static ssize_t res_mtip_hw_read_flags_49;
4935 /* LDV_COMMENT_BEGIN_PREP */
4936 #ifdef CONFIG_COMPAT
4937 #endif
4938 #ifdef CONFIG_COMPAT
4939 #endif
4940 /* LDV_COMMENT_END_PREP */
4941
4942 /** STRUCT: struct type: block_device_operations, struct name: mtip_block_ops **/
4943 /* content: static int mtip_block_ioctl(struct block_device *dev, fmode_t mode, unsigned cmd, unsigned long arg)*/
4944 /* LDV_COMMENT_BEGIN_PREP */
4945 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
4946 #define AHCI_RX_FIS_SZ 0x100
4947 #define AHCI_RX_FIS_OFFSET 0x0
4948 #define AHCI_IDFY_SZ ATA_SECT_SIZE
4949 #define AHCI_IDFY_OFFSET 0x400
4950 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
4951 #define AHCI_SECTBUF_OFFSET 0x800
4952 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
4953 #define AHCI_SMARTBUF_OFFSET 0xC00
4954 #define BLOCK_DMA_ALLOC_SZ 4096
4955 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
4956 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
4957 #define AHCI_CMD_TBL_OFFSET 0x0
4958 #define AHCI_CMD_TBL_HDR_SZ 0x80
4959 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
4960 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
4961 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
4962 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
4963 #define HOST_CAP_NZDMA (1 << 19)
4964 #define HOST_HSORG 0xFC
4965 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
4966 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
4967 #define HSORG_HWREV 0xFF00
4968 #define HSORG_STYLE 0x8
4969 #define HSORG_SLOTGROUPS 0x7
4970 #define PORT_COMMAND_ISSUE 0x38
4971 #define PORT_SDBV 0x7C
4972 #define PORT_OFFSET 0x100
4973 #define PORT_MEM_SIZE 0x80
4974 #define PORT_IRQ_ERR \
4975 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
4976 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
4977 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
4978 PORT_IRQ_OVERFLOW)
4979 #define PORT_IRQ_LEGACY \
4980 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
4981 #define PORT_IRQ_HANDLED \
4982 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
4983 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
4984 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
4985 #define DEF_PORT_IRQ \
4986 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
4987 #define MTIP_PRODUCT_UNKNOWN 0x00
4988 #define MTIP_PRODUCT_ASICFPGA 0x11
4989 #ifdef CONFIG_COMPAT
4990 #endif
4991 #ifdef __LITTLE_ENDIAN
4992 #else
4993 #endif
4994 #ifdef MTIP_TRIM
4995 #endif
4996 /* LDV_COMMENT_END_PREP */
4997 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_ioctl" */
4998 struct block_device * var_group2;
4999 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_ioctl" */
5000 fmode_t var_mtip_block_ioctl_69_p1;
5001 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_ioctl" */
5002 unsigned var_mtip_block_ioctl_69_p2;
5003 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_ioctl" */
5004 unsigned long var_mtip_block_ioctl_69_p3;
5005 /* LDV_COMMENT_BEGIN_PREP */
5006 #ifdef CONFIG_COMPAT
5007 #endif
5008 #ifdef CONFIG_COMPAT
5009 #endif
5010 /* LDV_COMMENT_END_PREP */
5011 /* content: static int mtip_block_compat_ioctl(struct block_device *dev, fmode_t mode, unsigned cmd, unsigned long arg)*/
5012 /* LDV_COMMENT_BEGIN_PREP */
5013 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5014 #define AHCI_RX_FIS_SZ 0x100
5015 #define AHCI_RX_FIS_OFFSET 0x0
5016 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5017 #define AHCI_IDFY_OFFSET 0x400
5018 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5019 #define AHCI_SECTBUF_OFFSET 0x800
5020 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5021 #define AHCI_SMARTBUF_OFFSET 0xC00
5022 #define BLOCK_DMA_ALLOC_SZ 4096
5023 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5024 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5025 #define AHCI_CMD_TBL_OFFSET 0x0
5026 #define AHCI_CMD_TBL_HDR_SZ 0x80
5027 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5028 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5029 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5030 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5031 #define HOST_CAP_NZDMA (1 << 19)
5032 #define HOST_HSORG 0xFC
5033 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5034 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5035 #define HSORG_HWREV 0xFF00
5036 #define HSORG_STYLE 0x8
5037 #define HSORG_SLOTGROUPS 0x7
5038 #define PORT_COMMAND_ISSUE 0x38
5039 #define PORT_SDBV 0x7C
5040 #define PORT_OFFSET 0x100
5041 #define PORT_MEM_SIZE 0x80
5042 #define PORT_IRQ_ERR \
5043 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5044 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5045 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5046 PORT_IRQ_OVERFLOW)
5047 #define PORT_IRQ_LEGACY \
5048 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5049 #define PORT_IRQ_HANDLED \
5050 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5051 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5052 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5053 #define DEF_PORT_IRQ \
5054 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5055 #define MTIP_PRODUCT_UNKNOWN 0x00
5056 #define MTIP_PRODUCT_ASICFPGA 0x11
5057 #ifdef CONFIG_COMPAT
5058 #endif
5059 #ifdef __LITTLE_ENDIAN
5060 #else
5061 #endif
5062 #ifdef MTIP_TRIM
5063 #endif
5064 #ifdef CONFIG_COMPAT
5065 /* LDV_COMMENT_END_PREP */
5066 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_compat_ioctl" */
5067 fmode_t var_mtip_block_compat_ioctl_70_p1;
5068 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_compat_ioctl" */
5069 unsigned var_mtip_block_compat_ioctl_70_p2;
5070 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_compat_ioctl" */
5071 unsigned long var_mtip_block_compat_ioctl_70_p3;
5072 /* LDV_COMMENT_BEGIN_PREP */
5073 #endif
5074 #ifdef CONFIG_COMPAT
5075 #endif
5076 /* LDV_COMMENT_END_PREP */
5077 /* content: static int mtip_block_getgeo(struct block_device *dev, struct hd_geometry *geo)*/
5078 /* LDV_COMMENT_BEGIN_PREP */
5079 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5080 #define AHCI_RX_FIS_SZ 0x100
5081 #define AHCI_RX_FIS_OFFSET 0x0
5082 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5083 #define AHCI_IDFY_OFFSET 0x400
5084 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5085 #define AHCI_SECTBUF_OFFSET 0x800
5086 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5087 #define AHCI_SMARTBUF_OFFSET 0xC00
5088 #define BLOCK_DMA_ALLOC_SZ 4096
5089 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5090 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5091 #define AHCI_CMD_TBL_OFFSET 0x0
5092 #define AHCI_CMD_TBL_HDR_SZ 0x80
5093 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5094 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5095 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5096 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5097 #define HOST_CAP_NZDMA (1 << 19)
5098 #define HOST_HSORG 0xFC
5099 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5100 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5101 #define HSORG_HWREV 0xFF00
5102 #define HSORG_STYLE 0x8
5103 #define HSORG_SLOTGROUPS 0x7
5104 #define PORT_COMMAND_ISSUE 0x38
5105 #define PORT_SDBV 0x7C
5106 #define PORT_OFFSET 0x100
5107 #define PORT_MEM_SIZE 0x80
5108 #define PORT_IRQ_ERR \
5109 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5110 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5111 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5112 PORT_IRQ_OVERFLOW)
5113 #define PORT_IRQ_LEGACY \
5114 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5115 #define PORT_IRQ_HANDLED \
5116 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5117 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5118 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5119 #define DEF_PORT_IRQ \
5120 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5121 #define MTIP_PRODUCT_UNKNOWN 0x00
5122 #define MTIP_PRODUCT_ASICFPGA 0x11
5123 #ifdef CONFIG_COMPAT
5124 #endif
5125 #ifdef __LITTLE_ENDIAN
5126 #else
5127 #endif
5128 #ifdef MTIP_TRIM
5129 #endif
5130 #ifdef CONFIG_COMPAT
5131 #endif
5132 /* LDV_COMMENT_END_PREP */
5133 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_block_getgeo" */
5134 struct hd_geometry * var_group3;
5135 /* LDV_COMMENT_BEGIN_PREP */
5136 #ifdef CONFIG_COMPAT
5137 #endif
5138 /* LDV_COMMENT_END_PREP */
5139
5140 /** STRUCT: struct type: blk_mq_ops, struct name: mtip_mq_ops **/
5141 /* content: static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)*/
5142 /* LDV_COMMENT_BEGIN_PREP */
5143 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5144 #define AHCI_RX_FIS_SZ 0x100
5145 #define AHCI_RX_FIS_OFFSET 0x0
5146 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5147 #define AHCI_IDFY_OFFSET 0x400
5148 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5149 #define AHCI_SECTBUF_OFFSET 0x800
5150 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5151 #define AHCI_SMARTBUF_OFFSET 0xC00
5152 #define BLOCK_DMA_ALLOC_SZ 4096
5153 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5154 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5155 #define AHCI_CMD_TBL_OFFSET 0x0
5156 #define AHCI_CMD_TBL_HDR_SZ 0x80
5157 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5158 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5159 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5160 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5161 #define HOST_CAP_NZDMA (1 << 19)
5162 #define HOST_HSORG 0xFC
5163 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5164 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5165 #define HSORG_HWREV 0xFF00
5166 #define HSORG_STYLE 0x8
5167 #define HSORG_SLOTGROUPS 0x7
5168 #define PORT_COMMAND_ISSUE 0x38
5169 #define PORT_SDBV 0x7C
5170 #define PORT_OFFSET 0x100
5171 #define PORT_MEM_SIZE 0x80
5172 #define PORT_IRQ_ERR \
5173 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5174 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5175 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5176 PORT_IRQ_OVERFLOW)
5177 #define PORT_IRQ_LEGACY \
5178 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5179 #define PORT_IRQ_HANDLED \
5180 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5181 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5182 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5183 #define DEF_PORT_IRQ \
5184 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5185 #define MTIP_PRODUCT_UNKNOWN 0x00
5186 #define MTIP_PRODUCT_ASICFPGA 0x11
5187 #ifdef CONFIG_COMPAT
5188 #endif
5189 #ifdef __LITTLE_ENDIAN
5190 #else
5191 #endif
5192 #ifdef MTIP_TRIM
5193 #endif
5194 #ifdef CONFIG_COMPAT
5195 #endif
5196 #ifdef CONFIG_COMPAT
5197 #endif
5198 /* LDV_COMMENT_END_PREP */
5199 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_queue_rq" */
5200 struct blk_mq_hw_ctx * var_group4;
5201 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_queue_rq" */
5202 const struct blk_mq_queue_data * var_mtip_queue_rq_75_p1;
5203 /* content: static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx, unsigned int numa_node)*/
5204 /* LDV_COMMENT_BEGIN_PREP */
5205 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5206 #define AHCI_RX_FIS_SZ 0x100
5207 #define AHCI_RX_FIS_OFFSET 0x0
5208 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5209 #define AHCI_IDFY_OFFSET 0x400
5210 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5211 #define AHCI_SECTBUF_OFFSET 0x800
5212 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5213 #define AHCI_SMARTBUF_OFFSET 0xC00
5214 #define BLOCK_DMA_ALLOC_SZ 4096
5215 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5216 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5217 #define AHCI_CMD_TBL_OFFSET 0x0
5218 #define AHCI_CMD_TBL_HDR_SZ 0x80
5219 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5220 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5221 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5222 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5223 #define HOST_CAP_NZDMA (1 << 19)
5224 #define HOST_HSORG 0xFC
5225 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5226 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5227 #define HSORG_HWREV 0xFF00
5228 #define HSORG_STYLE 0x8
5229 #define HSORG_SLOTGROUPS 0x7
5230 #define PORT_COMMAND_ISSUE 0x38
5231 #define PORT_SDBV 0x7C
5232 #define PORT_OFFSET 0x100
5233 #define PORT_MEM_SIZE 0x80
5234 #define PORT_IRQ_ERR \
5235 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5236 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5237 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5238 PORT_IRQ_OVERFLOW)
5239 #define PORT_IRQ_LEGACY \
5240 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5241 #define PORT_IRQ_HANDLED \
5242 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5243 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5244 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5245 #define DEF_PORT_IRQ \
5246 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5247 #define MTIP_PRODUCT_UNKNOWN 0x00
5248 #define MTIP_PRODUCT_ASICFPGA 0x11
5249 #ifdef CONFIG_COMPAT
5250 #endif
5251 #ifdef __LITTLE_ENDIAN
5252 #else
5253 #endif
5254 #ifdef MTIP_TRIM
5255 #endif
5256 #ifdef CONFIG_COMPAT
5257 #endif
5258 #ifdef CONFIG_COMPAT
5259 #endif
5260 /* LDV_COMMENT_END_PREP */
5261 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_init_cmd" */
5262 void * var_mtip_init_cmd_77_p0;
5263 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_init_cmd" */
5264 struct request * var_group5;
5265 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_init_cmd" */
5266 unsigned int var_mtip_init_cmd_77_p2;
5267 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_init_cmd" */
5268 unsigned int var_mtip_init_cmd_77_p3;
5269 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_init_cmd" */
5270 unsigned int var_mtip_init_cmd_77_p4;
5271 /* content: static void mtip_free_cmd(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx)*/
5272 /* LDV_COMMENT_BEGIN_PREP */
5273 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5274 #define AHCI_RX_FIS_SZ 0x100
5275 #define AHCI_RX_FIS_OFFSET 0x0
5276 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5277 #define AHCI_IDFY_OFFSET 0x400
5278 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5279 #define AHCI_SECTBUF_OFFSET 0x800
5280 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5281 #define AHCI_SMARTBUF_OFFSET 0xC00
5282 #define BLOCK_DMA_ALLOC_SZ 4096
5283 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5284 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5285 #define AHCI_CMD_TBL_OFFSET 0x0
5286 #define AHCI_CMD_TBL_HDR_SZ 0x80
5287 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5288 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5289 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5290 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5291 #define HOST_CAP_NZDMA (1 << 19)
5292 #define HOST_HSORG 0xFC
5293 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5294 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5295 #define HSORG_HWREV 0xFF00
5296 #define HSORG_STYLE 0x8
5297 #define HSORG_SLOTGROUPS 0x7
5298 #define PORT_COMMAND_ISSUE 0x38
5299 #define PORT_SDBV 0x7C
5300 #define PORT_OFFSET 0x100
5301 #define PORT_MEM_SIZE 0x80
5302 #define PORT_IRQ_ERR \
5303 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5304 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5305 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5306 PORT_IRQ_OVERFLOW)
5307 #define PORT_IRQ_LEGACY \
5308 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5309 #define PORT_IRQ_HANDLED \
5310 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5311 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5312 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5313 #define DEF_PORT_IRQ \
5314 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5315 #define MTIP_PRODUCT_UNKNOWN 0x00
5316 #define MTIP_PRODUCT_ASICFPGA 0x11
5317 #ifdef CONFIG_COMPAT
5318 #endif
5319 #ifdef __LITTLE_ENDIAN
5320 #else
5321 #endif
5322 #ifdef MTIP_TRIM
5323 #endif
5324 #ifdef CONFIG_COMPAT
5325 #endif
5326 #ifdef CONFIG_COMPAT
5327 #endif
5328 /* LDV_COMMENT_END_PREP */
5329 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_free_cmd" */
5330 void * var_mtip_free_cmd_76_p0;
5331 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_free_cmd" */
5332 unsigned int var_mtip_free_cmd_76_p2;
5333 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_free_cmd" */
5334 unsigned int var_mtip_free_cmd_76_p3;
5335
5336 /** STRUCT: struct type: pci_driver, struct name: mtip_pci_driver **/
5337 /* content: static int mtip_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)*/
5338 /* LDV_COMMENT_BEGIN_PREP */
5339 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5340 #define AHCI_RX_FIS_SZ 0x100
5341 #define AHCI_RX_FIS_OFFSET 0x0
5342 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5343 #define AHCI_IDFY_OFFSET 0x400
5344 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5345 #define AHCI_SECTBUF_OFFSET 0x800
5346 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5347 #define AHCI_SMARTBUF_OFFSET 0xC00
5348 #define BLOCK_DMA_ALLOC_SZ 4096
5349 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5350 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5351 #define AHCI_CMD_TBL_OFFSET 0x0
5352 #define AHCI_CMD_TBL_HDR_SZ 0x80
5353 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5354 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5355 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5356 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5357 #define HOST_CAP_NZDMA (1 << 19)
5358 #define HOST_HSORG 0xFC
5359 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5360 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5361 #define HSORG_HWREV 0xFF00
5362 #define HSORG_STYLE 0x8
5363 #define HSORG_SLOTGROUPS 0x7
5364 #define PORT_COMMAND_ISSUE 0x38
5365 #define PORT_SDBV 0x7C
5366 #define PORT_OFFSET 0x100
5367 #define PORT_MEM_SIZE 0x80
5368 #define PORT_IRQ_ERR \
5369 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5370 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5371 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5372 PORT_IRQ_OVERFLOW)
5373 #define PORT_IRQ_LEGACY \
5374 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5375 #define PORT_IRQ_HANDLED \
5376 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5377 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5378 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5379 #define DEF_PORT_IRQ \
5380 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5381 #define MTIP_PRODUCT_UNKNOWN 0x00
5382 #define MTIP_PRODUCT_ASICFPGA 0x11
5383 #ifdef CONFIG_COMPAT
5384 #endif
5385 #ifdef __LITTLE_ENDIAN
5386 #else
5387 #endif
5388 #ifdef MTIP_TRIM
5389 #endif
5390 #ifdef CONFIG_COMPAT
5391 #endif
5392 #ifdef CONFIG_COMPAT
5393 #endif
5394 /* LDV_COMMENT_END_PREP */
5395 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_pci_probe" */
5396 struct pci_dev * var_group6;
5397 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_pci_probe" */
5398 const struct pci_device_id * var_mtip_pci_probe_88_p1;
5399 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mtip_pci_probe" */
5400 static int res_mtip_pci_probe_88;
5401 /* content: static void mtip_pci_remove(struct pci_dev *pdev)*/
5402 /* LDV_COMMENT_BEGIN_PREP */
5403 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5404 #define AHCI_RX_FIS_SZ 0x100
5405 #define AHCI_RX_FIS_OFFSET 0x0
5406 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5407 #define AHCI_IDFY_OFFSET 0x400
5408 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5409 #define AHCI_SECTBUF_OFFSET 0x800
5410 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5411 #define AHCI_SMARTBUF_OFFSET 0xC00
5412 #define BLOCK_DMA_ALLOC_SZ 4096
5413 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5414 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5415 #define AHCI_CMD_TBL_OFFSET 0x0
5416 #define AHCI_CMD_TBL_HDR_SZ 0x80
5417 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5418 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5419 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5420 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5421 #define HOST_CAP_NZDMA (1 << 19)
5422 #define HOST_HSORG 0xFC
5423 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5424 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5425 #define HSORG_HWREV 0xFF00
5426 #define HSORG_STYLE 0x8
5427 #define HSORG_SLOTGROUPS 0x7
5428 #define PORT_COMMAND_ISSUE 0x38
5429 #define PORT_SDBV 0x7C
5430 #define PORT_OFFSET 0x100
5431 #define PORT_MEM_SIZE 0x80
5432 #define PORT_IRQ_ERR \
5433 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5434 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5435 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5436 PORT_IRQ_OVERFLOW)
5437 #define PORT_IRQ_LEGACY \
5438 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5439 #define PORT_IRQ_HANDLED \
5440 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5441 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5442 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5443 #define DEF_PORT_IRQ \
5444 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5445 #define MTIP_PRODUCT_UNKNOWN 0x00
5446 #define MTIP_PRODUCT_ASICFPGA 0x11
5447 #ifdef CONFIG_COMPAT
5448 #endif
5449 #ifdef __LITTLE_ENDIAN
5450 #else
5451 #endif
5452 #ifdef MTIP_TRIM
5453 #endif
5454 #ifdef CONFIG_COMPAT
5455 #endif
5456 #ifdef CONFIG_COMPAT
5457 #endif
5458 /* LDV_COMMENT_END_PREP */
5459 /* content: static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)*/
5460 /* LDV_COMMENT_BEGIN_PREP */
5461 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5462 #define AHCI_RX_FIS_SZ 0x100
5463 #define AHCI_RX_FIS_OFFSET 0x0
5464 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5465 #define AHCI_IDFY_OFFSET 0x400
5466 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5467 #define AHCI_SECTBUF_OFFSET 0x800
5468 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5469 #define AHCI_SMARTBUF_OFFSET 0xC00
5470 #define BLOCK_DMA_ALLOC_SZ 4096
5471 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5472 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5473 #define AHCI_CMD_TBL_OFFSET 0x0
5474 #define AHCI_CMD_TBL_HDR_SZ 0x80
5475 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5476 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5477 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5478 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5479 #define HOST_CAP_NZDMA (1 << 19)
5480 #define HOST_HSORG 0xFC
5481 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5482 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5483 #define HSORG_HWREV 0xFF00
5484 #define HSORG_STYLE 0x8
5485 #define HSORG_SLOTGROUPS 0x7
5486 #define PORT_COMMAND_ISSUE 0x38
5487 #define PORT_SDBV 0x7C
5488 #define PORT_OFFSET 0x100
5489 #define PORT_MEM_SIZE 0x80
5490 #define PORT_IRQ_ERR \
5491 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5492 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5493 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5494 PORT_IRQ_OVERFLOW)
5495 #define PORT_IRQ_LEGACY \
5496 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5497 #define PORT_IRQ_HANDLED \
5498 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5499 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5500 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5501 #define DEF_PORT_IRQ \
5502 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5503 #define MTIP_PRODUCT_UNKNOWN 0x00
5504 #define MTIP_PRODUCT_ASICFPGA 0x11
5505 #ifdef CONFIG_COMPAT
5506 #endif
5507 #ifdef __LITTLE_ENDIAN
5508 #else
5509 #endif
5510 #ifdef MTIP_TRIM
5511 #endif
5512 #ifdef CONFIG_COMPAT
5513 #endif
5514 #ifdef CONFIG_COMPAT
5515 #endif
5516 /* LDV_COMMENT_END_PREP */
5517 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_pci_suspend" */
5518 pm_message_t var_mtip_pci_suspend_90_p1;
5519 /* content: static int mtip_pci_resume(struct pci_dev *pdev)*/
5520 /* LDV_COMMENT_BEGIN_PREP */
5521 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5522 #define AHCI_RX_FIS_SZ 0x100
5523 #define AHCI_RX_FIS_OFFSET 0x0
5524 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5525 #define AHCI_IDFY_OFFSET 0x400
5526 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5527 #define AHCI_SECTBUF_OFFSET 0x800
5528 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5529 #define AHCI_SMARTBUF_OFFSET 0xC00
5530 #define BLOCK_DMA_ALLOC_SZ 4096
5531 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5532 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5533 #define AHCI_CMD_TBL_OFFSET 0x0
5534 #define AHCI_CMD_TBL_HDR_SZ 0x80
5535 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5536 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5537 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5538 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5539 #define HOST_CAP_NZDMA (1 << 19)
5540 #define HOST_HSORG 0xFC
5541 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5542 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5543 #define HSORG_HWREV 0xFF00
5544 #define HSORG_STYLE 0x8
5545 #define HSORG_SLOTGROUPS 0x7
5546 #define PORT_COMMAND_ISSUE 0x38
5547 #define PORT_SDBV 0x7C
5548 #define PORT_OFFSET 0x100
5549 #define PORT_MEM_SIZE 0x80
5550 #define PORT_IRQ_ERR \
5551 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5552 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5553 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5554 PORT_IRQ_OVERFLOW)
5555 #define PORT_IRQ_LEGACY \
5556 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5557 #define PORT_IRQ_HANDLED \
5558 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5559 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5560 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5561 #define DEF_PORT_IRQ \
5562 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5563 #define MTIP_PRODUCT_UNKNOWN 0x00
5564 #define MTIP_PRODUCT_ASICFPGA 0x11
5565 #ifdef CONFIG_COMPAT
5566 #endif
5567 #ifdef __LITTLE_ENDIAN
5568 #else
5569 #endif
5570 #ifdef MTIP_TRIM
5571 #endif
5572 #ifdef CONFIG_COMPAT
5573 #endif
5574 #ifdef CONFIG_COMPAT
5575 #endif
5576 /* LDV_COMMENT_END_PREP */
5577 /* content: static void mtip_pci_shutdown(struct pci_dev *pdev)*/
5578 /* LDV_COMMENT_BEGIN_PREP */
5579 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5580 #define AHCI_RX_FIS_SZ 0x100
5581 #define AHCI_RX_FIS_OFFSET 0x0
5582 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5583 #define AHCI_IDFY_OFFSET 0x400
5584 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5585 #define AHCI_SECTBUF_OFFSET 0x800
5586 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5587 #define AHCI_SMARTBUF_OFFSET 0xC00
5588 #define BLOCK_DMA_ALLOC_SZ 4096
5589 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5590 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5591 #define AHCI_CMD_TBL_OFFSET 0x0
5592 #define AHCI_CMD_TBL_HDR_SZ 0x80
5593 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5594 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5595 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5596 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5597 #define HOST_CAP_NZDMA (1 << 19)
5598 #define HOST_HSORG 0xFC
5599 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5600 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5601 #define HSORG_HWREV 0xFF00
5602 #define HSORG_STYLE 0x8
5603 #define HSORG_SLOTGROUPS 0x7
5604 #define PORT_COMMAND_ISSUE 0x38
5605 #define PORT_SDBV 0x7C
5606 #define PORT_OFFSET 0x100
5607 #define PORT_MEM_SIZE 0x80
5608 #define PORT_IRQ_ERR \
5609 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5610 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5611 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5612 PORT_IRQ_OVERFLOW)
5613 #define PORT_IRQ_LEGACY \
5614 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5615 #define PORT_IRQ_HANDLED \
5616 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5617 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5618 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5619 #define DEF_PORT_IRQ \
5620 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5621 #define MTIP_PRODUCT_UNKNOWN 0x00
5622 #define MTIP_PRODUCT_ASICFPGA 0x11
5623 #ifdef CONFIG_COMPAT
5624 #endif
5625 #ifdef __LITTLE_ENDIAN
5626 #else
5627 #endif
5628 #ifdef MTIP_TRIM
5629 #endif
5630 #ifdef CONFIG_COMPAT
5631 #endif
5632 #ifdef CONFIG_COMPAT
5633 #endif
5634 /* LDV_COMMENT_END_PREP */
5635
5636 /** CALLBACK SECTION request_irq **/
5637 /* content: static irqreturn_t mtip_irq_handler(int irq, void *instance)*/
5638 /* LDV_COMMENT_BEGIN_PREP */
5639 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5640 #define AHCI_RX_FIS_SZ 0x100
5641 #define AHCI_RX_FIS_OFFSET 0x0
5642 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5643 #define AHCI_IDFY_OFFSET 0x400
5644 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5645 #define AHCI_SECTBUF_OFFSET 0x800
5646 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5647 #define AHCI_SMARTBUF_OFFSET 0xC00
5648 #define BLOCK_DMA_ALLOC_SZ 4096
5649 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5650 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5651 #define AHCI_CMD_TBL_OFFSET 0x0
5652 #define AHCI_CMD_TBL_HDR_SZ 0x80
5653 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5654 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5655 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5656 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5657 #define HOST_CAP_NZDMA (1 << 19)
5658 #define HOST_HSORG 0xFC
5659 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5660 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5661 #define HSORG_HWREV 0xFF00
5662 #define HSORG_STYLE 0x8
5663 #define HSORG_SLOTGROUPS 0x7
5664 #define PORT_COMMAND_ISSUE 0x38
5665 #define PORT_SDBV 0x7C
5666 #define PORT_OFFSET 0x100
5667 #define PORT_MEM_SIZE 0x80
5668 #define PORT_IRQ_ERR \
5669 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5670 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5671 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5672 PORT_IRQ_OVERFLOW)
5673 #define PORT_IRQ_LEGACY \
5674 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5675 #define PORT_IRQ_HANDLED \
5676 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5677 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5678 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5679 #define DEF_PORT_IRQ \
5680 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5681 #define MTIP_PRODUCT_UNKNOWN 0x00
5682 #define MTIP_PRODUCT_ASICFPGA 0x11
5683 #ifdef CONFIG_COMPAT
5684 #endif
5685 /* LDV_COMMENT_END_PREP */
5686 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_irq_handler" */
5687 int var_mtip_irq_handler_23_p0;
5688 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtip_irq_handler" */
5689 void * var_mtip_irq_handler_23_p1;
5690 /* LDV_COMMENT_BEGIN_PREP */
5691 #ifdef __LITTLE_ENDIAN
5692 #else
5693 #endif
5694 #ifdef MTIP_TRIM
5695 #endif
5696 #ifdef CONFIG_COMPAT
5697 #endif
5698 #ifdef CONFIG_COMPAT
5699 #endif
5700 /* LDV_COMMENT_END_PREP */
5701
5702
5703
5704
5705 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
5706 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
5707 /*============================= VARIABLE INITIALIZING PART =============================*/
5708 LDV_IN_INTERRUPT=1;
5709
5710
5711
5712
5713 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
5714 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
5715 /*============================= FUNCTION CALL SECTION =============================*/
5716 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
5717 ldv_initialize();
5718
5719 /** INIT: init_type: ST_MODULE_INIT **/
5720 /* content: static int __init mtip_init(void)*/
5721 /* LDV_COMMENT_BEGIN_PREP */
5722 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5723 #define AHCI_RX_FIS_SZ 0x100
5724 #define AHCI_RX_FIS_OFFSET 0x0
5725 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5726 #define AHCI_IDFY_OFFSET 0x400
5727 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5728 #define AHCI_SECTBUF_OFFSET 0x800
5729 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5730 #define AHCI_SMARTBUF_OFFSET 0xC00
5731 #define BLOCK_DMA_ALLOC_SZ 4096
5732 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5733 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5734 #define AHCI_CMD_TBL_OFFSET 0x0
5735 #define AHCI_CMD_TBL_HDR_SZ 0x80
5736 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5737 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5738 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5739 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5740 #define HOST_CAP_NZDMA (1 << 19)
5741 #define HOST_HSORG 0xFC
5742 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5743 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5744 #define HSORG_HWREV 0xFF00
5745 #define HSORG_STYLE 0x8
5746 #define HSORG_SLOTGROUPS 0x7
5747 #define PORT_COMMAND_ISSUE 0x38
5748 #define PORT_SDBV 0x7C
5749 #define PORT_OFFSET 0x100
5750 #define PORT_MEM_SIZE 0x80
5751 #define PORT_IRQ_ERR \
5752 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5753 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5754 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5755 PORT_IRQ_OVERFLOW)
5756 #define PORT_IRQ_LEGACY \
5757 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5758 #define PORT_IRQ_HANDLED \
5759 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5760 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5761 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5762 #define DEF_PORT_IRQ \
5763 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5764 #define MTIP_PRODUCT_UNKNOWN 0x00
5765 #define MTIP_PRODUCT_ASICFPGA 0x11
5766 #ifdef CONFIG_COMPAT
5767 #endif
5768 #ifdef __LITTLE_ENDIAN
5769 #else
5770 #endif
5771 #ifdef MTIP_TRIM
5772 #endif
5773 #ifdef CONFIG_COMPAT
5774 #endif
5775 #ifdef CONFIG_COMPAT
5776 #endif
5777 /* LDV_COMMENT_END_PREP */
5778 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
5779 ldv_handler_precall();
5780 if(mtip_init())
5781 goto ldv_final;
5782 int ldv_s_mtip_device_status_fops_file_operations = 0;
5783
5784 int ldv_s_mtip_regs_fops_file_operations = 0;
5785
5786 int ldv_s_mtip_flags_fops_file_operations = 0;
5787
5788
5789
5790
5791
5792 int ldv_s_mtip_pci_driver_pci_driver = 0;
5793
5794
5795
5796
5797
5798 while( nondet_int()
5799 || !(ldv_s_mtip_device_status_fops_file_operations == 0)
5800 || !(ldv_s_mtip_regs_fops_file_operations == 0)
5801 || !(ldv_s_mtip_flags_fops_file_operations == 0)
5802 || !(ldv_s_mtip_pci_driver_pci_driver == 0)
5803 ) {
5804
5805 switch(nondet_int()) {
5806
5807 case 0: {
5808
5809 /** STRUCT: struct type: file_operations, struct name: mtip_device_status_fops **/
5810 if(ldv_s_mtip_device_status_fops_file_operations==0) {
5811
5812 /* content: static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, size_t len, loff_t *offset)*/
5813 /* LDV_COMMENT_BEGIN_PREP */
5814 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5815 #define AHCI_RX_FIS_SZ 0x100
5816 #define AHCI_RX_FIS_OFFSET 0x0
5817 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5818 #define AHCI_IDFY_OFFSET 0x400
5819 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5820 #define AHCI_SECTBUF_OFFSET 0x800
5821 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5822 #define AHCI_SMARTBUF_OFFSET 0xC00
5823 #define BLOCK_DMA_ALLOC_SZ 4096
5824 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5825 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5826 #define AHCI_CMD_TBL_OFFSET 0x0
5827 #define AHCI_CMD_TBL_HDR_SZ 0x80
5828 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5829 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5830 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5831 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5832 #define HOST_CAP_NZDMA (1 << 19)
5833 #define HOST_HSORG 0xFC
5834 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5835 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5836 #define HSORG_HWREV 0xFF00
5837 #define HSORG_STYLE 0x8
5838 #define HSORG_SLOTGROUPS 0x7
5839 #define PORT_COMMAND_ISSUE 0x38
5840 #define PORT_SDBV 0x7C
5841 #define PORT_OFFSET 0x100
5842 #define PORT_MEM_SIZE 0x80
5843 #define PORT_IRQ_ERR \
5844 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5845 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5846 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5847 PORT_IRQ_OVERFLOW)
5848 #define PORT_IRQ_LEGACY \
5849 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5850 #define PORT_IRQ_HANDLED \
5851 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5852 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5853 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5854 #define DEF_PORT_IRQ \
5855 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5856 #define MTIP_PRODUCT_UNKNOWN 0x00
5857 #define MTIP_PRODUCT_ASICFPGA 0x11
5858 #ifdef CONFIG_COMPAT
5859 #endif
5860 #ifdef __LITTLE_ENDIAN
5861 #else
5862 #endif
5863 #ifdef MTIP_TRIM
5864 #endif
5865 /* LDV_COMMENT_END_PREP */
5866 /* LDV_COMMENT_FUNCTION_CALL Function from field "read" from driver structure with callbacks "mtip_device_status_fops". Standart function test for correct return result. */
5867 ldv_handler_precall();
5868 res_mtip_hw_read_device_status_47 = mtip_hw_read_device_status( var_group1, var_mtip_hw_read_device_status_47_p1, var_mtip_hw_read_device_status_47_p2, var_mtip_hw_read_device_status_47_p3);
5869 ldv_check_return_value(res_mtip_hw_read_device_status_47);
5870 if(res_mtip_hw_read_device_status_47 < 0)
5871 goto ldv_module_exit;
5872 /* LDV_COMMENT_BEGIN_PREP */
5873 #ifdef CONFIG_COMPAT
5874 #endif
5875 #ifdef CONFIG_COMPAT
5876 #endif
5877 /* LDV_COMMENT_END_PREP */
5878 ldv_s_mtip_device_status_fops_file_operations=0;
5879
5880 }
5881
5882 }
5883
5884 break;
5885 case 1: {
5886
5887 /** STRUCT: struct type: file_operations, struct name: mtip_regs_fops **/
5888 if(ldv_s_mtip_regs_fops_file_operations==0) {
5889
5890 /* content: static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, size_t len, loff_t *offset)*/
5891 /* LDV_COMMENT_BEGIN_PREP */
5892 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5893 #define AHCI_RX_FIS_SZ 0x100
5894 #define AHCI_RX_FIS_OFFSET 0x0
5895 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5896 #define AHCI_IDFY_OFFSET 0x400
5897 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5898 #define AHCI_SECTBUF_OFFSET 0x800
5899 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5900 #define AHCI_SMARTBUF_OFFSET 0xC00
5901 #define BLOCK_DMA_ALLOC_SZ 4096
5902 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5903 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5904 #define AHCI_CMD_TBL_OFFSET 0x0
5905 #define AHCI_CMD_TBL_HDR_SZ 0x80
5906 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5907 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5908 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5909 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5910 #define HOST_CAP_NZDMA (1 << 19)
5911 #define HOST_HSORG 0xFC
5912 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5913 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5914 #define HSORG_HWREV 0xFF00
5915 #define HSORG_STYLE 0x8
5916 #define HSORG_SLOTGROUPS 0x7
5917 #define PORT_COMMAND_ISSUE 0x38
5918 #define PORT_SDBV 0x7C
5919 #define PORT_OFFSET 0x100
5920 #define PORT_MEM_SIZE 0x80
5921 #define PORT_IRQ_ERR \
5922 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
5923 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
5924 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
5925 PORT_IRQ_OVERFLOW)
5926 #define PORT_IRQ_LEGACY \
5927 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
5928 #define PORT_IRQ_HANDLED \
5929 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
5930 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
5931 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
5932 #define DEF_PORT_IRQ \
5933 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
5934 #define MTIP_PRODUCT_UNKNOWN 0x00
5935 #define MTIP_PRODUCT_ASICFPGA 0x11
5936 #ifdef CONFIG_COMPAT
5937 #endif
5938 #ifdef __LITTLE_ENDIAN
5939 #else
5940 #endif
5941 #ifdef MTIP_TRIM
5942 #endif
5943 /* LDV_COMMENT_END_PREP */
5944 /* LDV_COMMENT_FUNCTION_CALL Function from field "read" from driver structure with callbacks "mtip_regs_fops". Standart function test for correct return result. */
5945 ldv_handler_precall();
5946 res_mtip_hw_read_registers_48 = mtip_hw_read_registers( var_group1, var_mtip_hw_read_registers_48_p1, var_mtip_hw_read_registers_48_p2, var_mtip_hw_read_registers_48_p3);
5947 ldv_check_return_value(res_mtip_hw_read_registers_48);
5948 if(res_mtip_hw_read_registers_48 < 0)
5949 goto ldv_module_exit;
5950 /* LDV_COMMENT_BEGIN_PREP */
5951 #ifdef CONFIG_COMPAT
5952 #endif
5953 #ifdef CONFIG_COMPAT
5954 #endif
5955 /* LDV_COMMENT_END_PREP */
5956 ldv_s_mtip_regs_fops_file_operations=0;
5957
5958 }
5959
5960 }
5961
5962 break;
5963 case 2: {
5964
5965 /** STRUCT: struct type: file_operations, struct name: mtip_flags_fops **/
5966 if(ldv_s_mtip_flags_fops_file_operations==0) {
5967
5968 /* content: static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, size_t len, loff_t *offset)*/
5969 /* LDV_COMMENT_BEGIN_PREP */
5970 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
5971 #define AHCI_RX_FIS_SZ 0x100
5972 #define AHCI_RX_FIS_OFFSET 0x0
5973 #define AHCI_IDFY_SZ ATA_SECT_SIZE
5974 #define AHCI_IDFY_OFFSET 0x400
5975 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
5976 #define AHCI_SECTBUF_OFFSET 0x800
5977 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
5978 #define AHCI_SMARTBUF_OFFSET 0xC00
5979 #define BLOCK_DMA_ALLOC_SZ 4096
5980 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
5981 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
5982 #define AHCI_CMD_TBL_OFFSET 0x0
5983 #define AHCI_CMD_TBL_HDR_SZ 0x80
5984 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
5985 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
5986 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
5987 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
5988 #define HOST_CAP_NZDMA (1 << 19)
5989 #define HOST_HSORG 0xFC
5990 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
5991 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
5992 #define HSORG_HWREV 0xFF00
5993 #define HSORG_STYLE 0x8
5994 #define HSORG_SLOTGROUPS 0x7
5995 #define PORT_COMMAND_ISSUE 0x38
5996 #define PORT_SDBV 0x7C
5997 #define PORT_OFFSET 0x100
5998 #define PORT_MEM_SIZE 0x80
5999 #define PORT_IRQ_ERR \
6000 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6001 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6002 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6003 PORT_IRQ_OVERFLOW)
6004 #define PORT_IRQ_LEGACY \
6005 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6006 #define PORT_IRQ_HANDLED \
6007 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6008 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6009 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6010 #define DEF_PORT_IRQ \
6011 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6012 #define MTIP_PRODUCT_UNKNOWN 0x00
6013 #define MTIP_PRODUCT_ASICFPGA 0x11
6014 #ifdef CONFIG_COMPAT
6015 #endif
6016 #ifdef __LITTLE_ENDIAN
6017 #else
6018 #endif
6019 #ifdef MTIP_TRIM
6020 #endif
6021 /* LDV_COMMENT_END_PREP */
6022 /* LDV_COMMENT_FUNCTION_CALL Function from field "read" from driver structure with callbacks "mtip_flags_fops". Standart function test for correct return result. */
6023 ldv_handler_precall();
6024 res_mtip_hw_read_flags_49 = mtip_hw_read_flags( var_group1, var_mtip_hw_read_flags_49_p1, var_mtip_hw_read_flags_49_p2, var_mtip_hw_read_flags_49_p3);
6025 ldv_check_return_value(res_mtip_hw_read_flags_49);
6026 if(res_mtip_hw_read_flags_49 < 0)
6027 goto ldv_module_exit;
6028 /* LDV_COMMENT_BEGIN_PREP */
6029 #ifdef CONFIG_COMPAT
6030 #endif
6031 #ifdef CONFIG_COMPAT
6032 #endif
6033 /* LDV_COMMENT_END_PREP */
6034 ldv_s_mtip_flags_fops_file_operations=0;
6035
6036 }
6037
6038 }
6039
6040 break;
6041 case 3: {
6042
6043 /** STRUCT: struct type: block_device_operations, struct name: mtip_block_ops **/
6044
6045
6046 /* content: static int mtip_block_ioctl(struct block_device *dev, fmode_t mode, unsigned cmd, unsigned long arg)*/
6047 /* LDV_COMMENT_BEGIN_PREP */
6048 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6049 #define AHCI_RX_FIS_SZ 0x100
6050 #define AHCI_RX_FIS_OFFSET 0x0
6051 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6052 #define AHCI_IDFY_OFFSET 0x400
6053 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6054 #define AHCI_SECTBUF_OFFSET 0x800
6055 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6056 #define AHCI_SMARTBUF_OFFSET 0xC00
6057 #define BLOCK_DMA_ALLOC_SZ 4096
6058 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6059 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6060 #define AHCI_CMD_TBL_OFFSET 0x0
6061 #define AHCI_CMD_TBL_HDR_SZ 0x80
6062 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6063 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6064 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6065 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6066 #define HOST_CAP_NZDMA (1 << 19)
6067 #define HOST_HSORG 0xFC
6068 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6069 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6070 #define HSORG_HWREV 0xFF00
6071 #define HSORG_STYLE 0x8
6072 #define HSORG_SLOTGROUPS 0x7
6073 #define PORT_COMMAND_ISSUE 0x38
6074 #define PORT_SDBV 0x7C
6075 #define PORT_OFFSET 0x100
6076 #define PORT_MEM_SIZE 0x80
6077 #define PORT_IRQ_ERR \
6078 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6079 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6080 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6081 PORT_IRQ_OVERFLOW)
6082 #define PORT_IRQ_LEGACY \
6083 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6084 #define PORT_IRQ_HANDLED \
6085 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6086 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6087 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6088 #define DEF_PORT_IRQ \
6089 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6090 #define MTIP_PRODUCT_UNKNOWN 0x00
6091 #define MTIP_PRODUCT_ASICFPGA 0x11
6092 #ifdef CONFIG_COMPAT
6093 #endif
6094 #ifdef __LITTLE_ENDIAN
6095 #else
6096 #endif
6097 #ifdef MTIP_TRIM
6098 #endif
6099 /* LDV_COMMENT_END_PREP */
6100 /* LDV_COMMENT_FUNCTION_CALL Function from field "ioctl" from driver structure with callbacks "mtip_block_ops" */
6101 ldv_handler_precall();
6102 mtip_block_ioctl( var_group2, var_mtip_block_ioctl_69_p1, var_mtip_block_ioctl_69_p2, var_mtip_block_ioctl_69_p3);
6103 /* LDV_COMMENT_BEGIN_PREP */
6104 #ifdef CONFIG_COMPAT
6105 #endif
6106 #ifdef CONFIG_COMPAT
6107 #endif
6108 /* LDV_COMMENT_END_PREP */
6109
6110
6111
6112
6113 }
6114
6115 break;
6116 case 4: {
6117
6118 /** STRUCT: struct type: block_device_operations, struct name: mtip_block_ops **/
6119
6120
6121 /* content: static int mtip_block_compat_ioctl(struct block_device *dev, fmode_t mode, unsigned cmd, unsigned long arg)*/
6122 /* LDV_COMMENT_BEGIN_PREP */
6123 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6124 #define AHCI_RX_FIS_SZ 0x100
6125 #define AHCI_RX_FIS_OFFSET 0x0
6126 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6127 #define AHCI_IDFY_OFFSET 0x400
6128 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6129 #define AHCI_SECTBUF_OFFSET 0x800
6130 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6131 #define AHCI_SMARTBUF_OFFSET 0xC00
6132 #define BLOCK_DMA_ALLOC_SZ 4096
6133 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6134 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6135 #define AHCI_CMD_TBL_OFFSET 0x0
6136 #define AHCI_CMD_TBL_HDR_SZ 0x80
6137 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6138 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6139 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6140 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6141 #define HOST_CAP_NZDMA (1 << 19)
6142 #define HOST_HSORG 0xFC
6143 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6144 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6145 #define HSORG_HWREV 0xFF00
6146 #define HSORG_STYLE 0x8
6147 #define HSORG_SLOTGROUPS 0x7
6148 #define PORT_COMMAND_ISSUE 0x38
6149 #define PORT_SDBV 0x7C
6150 #define PORT_OFFSET 0x100
6151 #define PORT_MEM_SIZE 0x80
6152 #define PORT_IRQ_ERR \
6153 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6154 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6155 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6156 PORT_IRQ_OVERFLOW)
6157 #define PORT_IRQ_LEGACY \
6158 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6159 #define PORT_IRQ_HANDLED \
6160 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6161 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6162 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6163 #define DEF_PORT_IRQ \
6164 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6165 #define MTIP_PRODUCT_UNKNOWN 0x00
6166 #define MTIP_PRODUCT_ASICFPGA 0x11
6167 #ifdef CONFIG_COMPAT
6168 #endif
6169 #ifdef __LITTLE_ENDIAN
6170 #else
6171 #endif
6172 #ifdef MTIP_TRIM
6173 #endif
6174 #ifdef CONFIG_COMPAT
6175 /* LDV_COMMENT_END_PREP */
6176 /* LDV_COMMENT_FUNCTION_CALL Function from field "compat_ioctl" from driver structure with callbacks "mtip_block_ops" */
6177 ldv_handler_precall();
6178 mtip_block_compat_ioctl( var_group2, var_mtip_block_compat_ioctl_70_p1, var_mtip_block_compat_ioctl_70_p2, var_mtip_block_compat_ioctl_70_p3);
6179 /* LDV_COMMENT_BEGIN_PREP */
6180 #endif
6181 #ifdef CONFIG_COMPAT
6182 #endif
6183 /* LDV_COMMENT_END_PREP */
6184
6185
6186
6187
6188 }
6189
6190 break;
6191 case 5: {
6192
6193 /** STRUCT: struct type: block_device_operations, struct name: mtip_block_ops **/
6194
6195
6196 /* content: static int mtip_block_getgeo(struct block_device *dev, struct hd_geometry *geo)*/
6197 /* LDV_COMMENT_BEGIN_PREP */
6198 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6199 #define AHCI_RX_FIS_SZ 0x100
6200 #define AHCI_RX_FIS_OFFSET 0x0
6201 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6202 #define AHCI_IDFY_OFFSET 0x400
6203 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6204 #define AHCI_SECTBUF_OFFSET 0x800
6205 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6206 #define AHCI_SMARTBUF_OFFSET 0xC00
6207 #define BLOCK_DMA_ALLOC_SZ 4096
6208 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6209 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6210 #define AHCI_CMD_TBL_OFFSET 0x0
6211 #define AHCI_CMD_TBL_HDR_SZ 0x80
6212 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6213 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6214 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6215 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6216 #define HOST_CAP_NZDMA (1 << 19)
6217 #define HOST_HSORG 0xFC
6218 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6219 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6220 #define HSORG_HWREV 0xFF00
6221 #define HSORG_STYLE 0x8
6222 #define HSORG_SLOTGROUPS 0x7
6223 #define PORT_COMMAND_ISSUE 0x38
6224 #define PORT_SDBV 0x7C
6225 #define PORT_OFFSET 0x100
6226 #define PORT_MEM_SIZE 0x80
6227 #define PORT_IRQ_ERR \
6228 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6229 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6230 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6231 PORT_IRQ_OVERFLOW)
6232 #define PORT_IRQ_LEGACY \
6233 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6234 #define PORT_IRQ_HANDLED \
6235 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6236 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6237 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6238 #define DEF_PORT_IRQ \
6239 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6240 #define MTIP_PRODUCT_UNKNOWN 0x00
6241 #define MTIP_PRODUCT_ASICFPGA 0x11
6242 #ifdef CONFIG_COMPAT
6243 #endif
6244 #ifdef __LITTLE_ENDIAN
6245 #else
6246 #endif
6247 #ifdef MTIP_TRIM
6248 #endif
6249 #ifdef CONFIG_COMPAT
6250 #endif
6251 /* LDV_COMMENT_END_PREP */
6252 /* LDV_COMMENT_FUNCTION_CALL Function from field "getgeo" from driver structure with callbacks "mtip_block_ops" */
6253 ldv_handler_precall();
6254 mtip_block_getgeo( var_group2, var_group3);
6255 /* LDV_COMMENT_BEGIN_PREP */
6256 #ifdef CONFIG_COMPAT
6257 #endif
6258 /* LDV_COMMENT_END_PREP */
6259
6260
6261
6262
6263 }
6264
6265 break;
6266 case 6: {
6267
6268 /** STRUCT: struct type: blk_mq_ops, struct name: mtip_mq_ops **/
6269
6270
6271 /* content: static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)*/
6272 /* LDV_COMMENT_BEGIN_PREP */
6273 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6274 #define AHCI_RX_FIS_SZ 0x100
6275 #define AHCI_RX_FIS_OFFSET 0x0
6276 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6277 #define AHCI_IDFY_OFFSET 0x400
6278 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6279 #define AHCI_SECTBUF_OFFSET 0x800
6280 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6281 #define AHCI_SMARTBUF_OFFSET 0xC00
6282 #define BLOCK_DMA_ALLOC_SZ 4096
6283 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6284 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6285 #define AHCI_CMD_TBL_OFFSET 0x0
6286 #define AHCI_CMD_TBL_HDR_SZ 0x80
6287 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6288 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6289 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6290 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6291 #define HOST_CAP_NZDMA (1 << 19)
6292 #define HOST_HSORG 0xFC
6293 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6294 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6295 #define HSORG_HWREV 0xFF00
6296 #define HSORG_STYLE 0x8
6297 #define HSORG_SLOTGROUPS 0x7
6298 #define PORT_COMMAND_ISSUE 0x38
6299 #define PORT_SDBV 0x7C
6300 #define PORT_OFFSET 0x100
6301 #define PORT_MEM_SIZE 0x80
6302 #define PORT_IRQ_ERR \
6303 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6304 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6305 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6306 PORT_IRQ_OVERFLOW)
6307 #define PORT_IRQ_LEGACY \
6308 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6309 #define PORT_IRQ_HANDLED \
6310 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6311 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6312 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6313 #define DEF_PORT_IRQ \
6314 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6315 #define MTIP_PRODUCT_UNKNOWN 0x00
6316 #define MTIP_PRODUCT_ASICFPGA 0x11
6317 #ifdef CONFIG_COMPAT
6318 #endif
6319 #ifdef __LITTLE_ENDIAN
6320 #else
6321 #endif
6322 #ifdef MTIP_TRIM
6323 #endif
6324 #ifdef CONFIG_COMPAT
6325 #endif
6326 #ifdef CONFIG_COMPAT
6327 #endif
6328 /* LDV_COMMENT_END_PREP */
6329 /* LDV_COMMENT_FUNCTION_CALL Function from field "queue_rq" from driver structure with callbacks "mtip_mq_ops" */
6330 ldv_handler_precall();
6331 mtip_queue_rq( var_group4, var_mtip_queue_rq_75_p1);
6332
6333
6334
6335
6336 }
6337
6338 break;
6339 case 7: {
6340
6341 /** STRUCT: struct type: blk_mq_ops, struct name: mtip_mq_ops **/
6342
6343
6344 /* content: static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx, unsigned int numa_node)*/
6345 /* LDV_COMMENT_BEGIN_PREP */
6346 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6347 #define AHCI_RX_FIS_SZ 0x100
6348 #define AHCI_RX_FIS_OFFSET 0x0
6349 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6350 #define AHCI_IDFY_OFFSET 0x400
6351 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6352 #define AHCI_SECTBUF_OFFSET 0x800
6353 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6354 #define AHCI_SMARTBUF_OFFSET 0xC00
6355 #define BLOCK_DMA_ALLOC_SZ 4096
6356 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6357 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6358 #define AHCI_CMD_TBL_OFFSET 0x0
6359 #define AHCI_CMD_TBL_HDR_SZ 0x80
6360 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6361 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6362 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6363 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6364 #define HOST_CAP_NZDMA (1 << 19)
6365 #define HOST_HSORG 0xFC
6366 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6367 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6368 #define HSORG_HWREV 0xFF00
6369 #define HSORG_STYLE 0x8
6370 #define HSORG_SLOTGROUPS 0x7
6371 #define PORT_COMMAND_ISSUE 0x38
6372 #define PORT_SDBV 0x7C
6373 #define PORT_OFFSET 0x100
6374 #define PORT_MEM_SIZE 0x80
6375 #define PORT_IRQ_ERR \
6376 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6377 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6378 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6379 PORT_IRQ_OVERFLOW)
6380 #define PORT_IRQ_LEGACY \
6381 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6382 #define PORT_IRQ_HANDLED \
6383 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6384 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6385 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6386 #define DEF_PORT_IRQ \
6387 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6388 #define MTIP_PRODUCT_UNKNOWN 0x00
6389 #define MTIP_PRODUCT_ASICFPGA 0x11
6390 #ifdef CONFIG_COMPAT
6391 #endif
6392 #ifdef __LITTLE_ENDIAN
6393 #else
6394 #endif
6395 #ifdef MTIP_TRIM
6396 #endif
6397 #ifdef CONFIG_COMPAT
6398 #endif
6399 #ifdef CONFIG_COMPAT
6400 #endif
6401 /* LDV_COMMENT_END_PREP */
6402 /* LDV_COMMENT_FUNCTION_CALL Function from field "init_request" from driver structure with callbacks "mtip_mq_ops" */
6403 ldv_handler_precall();
6404 mtip_init_cmd( var_mtip_init_cmd_77_p0, var_group5, var_mtip_init_cmd_77_p2, var_mtip_init_cmd_77_p3, var_mtip_init_cmd_77_p4);
6405
6406
6407
6408
6409 }
6410
6411 break;
6412 case 8: {
6413
6414 /** STRUCT: struct type: blk_mq_ops, struct name: mtip_mq_ops **/
6415
6416
6417 /* content: static void mtip_free_cmd(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx)*/
6418 /* LDV_COMMENT_BEGIN_PREP */
6419 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6420 #define AHCI_RX_FIS_SZ 0x100
6421 #define AHCI_RX_FIS_OFFSET 0x0
6422 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6423 #define AHCI_IDFY_OFFSET 0x400
6424 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6425 #define AHCI_SECTBUF_OFFSET 0x800
6426 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6427 #define AHCI_SMARTBUF_OFFSET 0xC00
6428 #define BLOCK_DMA_ALLOC_SZ 4096
6429 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6430 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6431 #define AHCI_CMD_TBL_OFFSET 0x0
6432 #define AHCI_CMD_TBL_HDR_SZ 0x80
6433 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6434 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6435 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6436 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6437 #define HOST_CAP_NZDMA (1 << 19)
6438 #define HOST_HSORG 0xFC
6439 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6440 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6441 #define HSORG_HWREV 0xFF00
6442 #define HSORG_STYLE 0x8
6443 #define HSORG_SLOTGROUPS 0x7
6444 #define PORT_COMMAND_ISSUE 0x38
6445 #define PORT_SDBV 0x7C
6446 #define PORT_OFFSET 0x100
6447 #define PORT_MEM_SIZE 0x80
6448 #define PORT_IRQ_ERR \
6449 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6450 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6451 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6452 PORT_IRQ_OVERFLOW)
6453 #define PORT_IRQ_LEGACY \
6454 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6455 #define PORT_IRQ_HANDLED \
6456 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6457 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6458 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6459 #define DEF_PORT_IRQ \
6460 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6461 #define MTIP_PRODUCT_UNKNOWN 0x00
6462 #define MTIP_PRODUCT_ASICFPGA 0x11
6463 #ifdef CONFIG_COMPAT
6464 #endif
6465 #ifdef __LITTLE_ENDIAN
6466 #else
6467 #endif
6468 #ifdef MTIP_TRIM
6469 #endif
6470 #ifdef CONFIG_COMPAT
6471 #endif
6472 #ifdef CONFIG_COMPAT
6473 #endif
6474 /* LDV_COMMENT_END_PREP */
6475 /* LDV_COMMENT_FUNCTION_CALL Function from field "exit_request" from driver structure with callbacks "mtip_mq_ops" */
6476 ldv_handler_precall();
6477 mtip_free_cmd( var_mtip_free_cmd_76_p0, var_group5, var_mtip_free_cmd_76_p2, var_mtip_free_cmd_76_p3);
6478
6479
6480
6481
6482 }
6483
6484 break;
6485 case 9: {
6486
6487 /** STRUCT: struct type: pci_driver, struct name: mtip_pci_driver **/
6488 if(ldv_s_mtip_pci_driver_pci_driver==0) {
6489
6490 /* content: static int mtip_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)*/
6491 /* LDV_COMMENT_BEGIN_PREP */
6492 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6493 #define AHCI_RX_FIS_SZ 0x100
6494 #define AHCI_RX_FIS_OFFSET 0x0
6495 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6496 #define AHCI_IDFY_OFFSET 0x400
6497 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6498 #define AHCI_SECTBUF_OFFSET 0x800
6499 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6500 #define AHCI_SMARTBUF_OFFSET 0xC00
6501 #define BLOCK_DMA_ALLOC_SZ 4096
6502 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6503 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6504 #define AHCI_CMD_TBL_OFFSET 0x0
6505 #define AHCI_CMD_TBL_HDR_SZ 0x80
6506 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6507 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6508 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6509 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6510 #define HOST_CAP_NZDMA (1 << 19)
6511 #define HOST_HSORG 0xFC
6512 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6513 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6514 #define HSORG_HWREV 0xFF00
6515 #define HSORG_STYLE 0x8
6516 #define HSORG_SLOTGROUPS 0x7
6517 #define PORT_COMMAND_ISSUE 0x38
6518 #define PORT_SDBV 0x7C
6519 #define PORT_OFFSET 0x100
6520 #define PORT_MEM_SIZE 0x80
6521 #define PORT_IRQ_ERR \
6522 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6523 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6524 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6525 PORT_IRQ_OVERFLOW)
6526 #define PORT_IRQ_LEGACY \
6527 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6528 #define PORT_IRQ_HANDLED \
6529 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6530 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6531 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6532 #define DEF_PORT_IRQ \
6533 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6534 #define MTIP_PRODUCT_UNKNOWN 0x00
6535 #define MTIP_PRODUCT_ASICFPGA 0x11
6536 #ifdef CONFIG_COMPAT
6537 #endif
6538 #ifdef __LITTLE_ENDIAN
6539 #else
6540 #endif
6541 #ifdef MTIP_TRIM
6542 #endif
6543 #ifdef CONFIG_COMPAT
6544 #endif
6545 #ifdef CONFIG_COMPAT
6546 #endif
6547 /* LDV_COMMENT_END_PREP */
6548 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mtip_pci_driver". Standart function test for correct return result. */
6549 res_mtip_pci_probe_88 = mtip_pci_probe( var_group6, var_mtip_pci_probe_88_p1);
6550 ldv_check_return_value(res_mtip_pci_probe_88);
6551 ldv_check_return_value_probe(res_mtip_pci_probe_88);
6552 if(res_mtip_pci_probe_88)
6553 goto ldv_module_exit;
6554 ldv_s_mtip_pci_driver_pci_driver++;
6555
6556 }
6557
6558 }
6559
6560 break;
6561 case 10: {
6562
6563 /** STRUCT: struct type: pci_driver, struct name: mtip_pci_driver **/
6564 if(ldv_s_mtip_pci_driver_pci_driver==1) {
6565
6566 /* content: static void mtip_pci_remove(struct pci_dev *pdev)*/
6567 /* LDV_COMMENT_BEGIN_PREP */
6568 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6569 #define AHCI_RX_FIS_SZ 0x100
6570 #define AHCI_RX_FIS_OFFSET 0x0
6571 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6572 #define AHCI_IDFY_OFFSET 0x400
6573 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6574 #define AHCI_SECTBUF_OFFSET 0x800
6575 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6576 #define AHCI_SMARTBUF_OFFSET 0xC00
6577 #define BLOCK_DMA_ALLOC_SZ 4096
6578 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6579 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6580 #define AHCI_CMD_TBL_OFFSET 0x0
6581 #define AHCI_CMD_TBL_HDR_SZ 0x80
6582 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6583 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6584 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6585 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6586 #define HOST_CAP_NZDMA (1 << 19)
6587 #define HOST_HSORG 0xFC
6588 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6589 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6590 #define HSORG_HWREV 0xFF00
6591 #define HSORG_STYLE 0x8
6592 #define HSORG_SLOTGROUPS 0x7
6593 #define PORT_COMMAND_ISSUE 0x38
6594 #define PORT_SDBV 0x7C
6595 #define PORT_OFFSET 0x100
6596 #define PORT_MEM_SIZE 0x80
6597 #define PORT_IRQ_ERR \
6598 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6599 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6600 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6601 PORT_IRQ_OVERFLOW)
6602 #define PORT_IRQ_LEGACY \
6603 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6604 #define PORT_IRQ_HANDLED \
6605 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6606 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6607 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6608 #define DEF_PORT_IRQ \
6609 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6610 #define MTIP_PRODUCT_UNKNOWN 0x00
6611 #define MTIP_PRODUCT_ASICFPGA 0x11
6612 #ifdef CONFIG_COMPAT
6613 #endif
6614 #ifdef __LITTLE_ENDIAN
6615 #else
6616 #endif
6617 #ifdef MTIP_TRIM
6618 #endif
6619 #ifdef CONFIG_COMPAT
6620 #endif
6621 #ifdef CONFIG_COMPAT
6622 #endif
6623 /* LDV_COMMENT_END_PREP */
6624 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mtip_pci_driver" */
6625 ldv_handler_precall();
6626 mtip_pci_remove( var_group6);
6627 ldv_s_mtip_pci_driver_pci_driver=0;
6628
6629 }
6630
6631 }
6632
6633 break;
6634 case 11: {
6635
6636 /** STRUCT: struct type: pci_driver, struct name: mtip_pci_driver **/
6637
6638
6639 /* content: static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)*/
6640 /* LDV_COMMENT_BEGIN_PREP */
6641 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6642 #define AHCI_RX_FIS_SZ 0x100
6643 #define AHCI_RX_FIS_OFFSET 0x0
6644 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6645 #define AHCI_IDFY_OFFSET 0x400
6646 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6647 #define AHCI_SECTBUF_OFFSET 0x800
6648 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6649 #define AHCI_SMARTBUF_OFFSET 0xC00
6650 #define BLOCK_DMA_ALLOC_SZ 4096
6651 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6652 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6653 #define AHCI_CMD_TBL_OFFSET 0x0
6654 #define AHCI_CMD_TBL_HDR_SZ 0x80
6655 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6656 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6657 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6658 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6659 #define HOST_CAP_NZDMA (1 << 19)
6660 #define HOST_HSORG 0xFC
6661 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6662 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6663 #define HSORG_HWREV 0xFF00
6664 #define HSORG_STYLE 0x8
6665 #define HSORG_SLOTGROUPS 0x7
6666 #define PORT_COMMAND_ISSUE 0x38
6667 #define PORT_SDBV 0x7C
6668 #define PORT_OFFSET 0x100
6669 #define PORT_MEM_SIZE 0x80
6670 #define PORT_IRQ_ERR \
6671 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6672 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6673 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6674 PORT_IRQ_OVERFLOW)
6675 #define PORT_IRQ_LEGACY \
6676 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6677 #define PORT_IRQ_HANDLED \
6678 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6679 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6680 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6681 #define DEF_PORT_IRQ \
6682 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6683 #define MTIP_PRODUCT_UNKNOWN 0x00
6684 #define MTIP_PRODUCT_ASICFPGA 0x11
6685 #ifdef CONFIG_COMPAT
6686 #endif
6687 #ifdef __LITTLE_ENDIAN
6688 #else
6689 #endif
6690 #ifdef MTIP_TRIM
6691 #endif
6692 #ifdef CONFIG_COMPAT
6693 #endif
6694 #ifdef CONFIG_COMPAT
6695 #endif
6696 /* LDV_COMMENT_END_PREP */
6697 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "mtip_pci_driver" */
6698 ldv_handler_precall();
6699 mtip_pci_suspend( var_group6, var_mtip_pci_suspend_90_p1);
6700
6701
6702
6703
6704 }
6705
6706 break;
6707 case 12: {
6708
6709 /** STRUCT: struct type: pci_driver, struct name: mtip_pci_driver **/
6710
6711
6712 /* content: static int mtip_pci_resume(struct pci_dev *pdev)*/
6713 /* LDV_COMMENT_BEGIN_PREP */
6714 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6715 #define AHCI_RX_FIS_SZ 0x100
6716 #define AHCI_RX_FIS_OFFSET 0x0
6717 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6718 #define AHCI_IDFY_OFFSET 0x400
6719 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6720 #define AHCI_SECTBUF_OFFSET 0x800
6721 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6722 #define AHCI_SMARTBUF_OFFSET 0xC00
6723 #define BLOCK_DMA_ALLOC_SZ 4096
6724 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6725 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6726 #define AHCI_CMD_TBL_OFFSET 0x0
6727 #define AHCI_CMD_TBL_HDR_SZ 0x80
6728 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6729 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6730 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6731 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6732 #define HOST_CAP_NZDMA (1 << 19)
6733 #define HOST_HSORG 0xFC
6734 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6735 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6736 #define HSORG_HWREV 0xFF00
6737 #define HSORG_STYLE 0x8
6738 #define HSORG_SLOTGROUPS 0x7
6739 #define PORT_COMMAND_ISSUE 0x38
6740 #define PORT_SDBV 0x7C
6741 #define PORT_OFFSET 0x100
6742 #define PORT_MEM_SIZE 0x80
6743 #define PORT_IRQ_ERR \
6744 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6745 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6746 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6747 PORT_IRQ_OVERFLOW)
6748 #define PORT_IRQ_LEGACY \
6749 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6750 #define PORT_IRQ_HANDLED \
6751 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6752 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6753 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6754 #define DEF_PORT_IRQ \
6755 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6756 #define MTIP_PRODUCT_UNKNOWN 0x00
6757 #define MTIP_PRODUCT_ASICFPGA 0x11
6758 #ifdef CONFIG_COMPAT
6759 #endif
6760 #ifdef __LITTLE_ENDIAN
6761 #else
6762 #endif
6763 #ifdef MTIP_TRIM
6764 #endif
6765 #ifdef CONFIG_COMPAT
6766 #endif
6767 #ifdef CONFIG_COMPAT
6768 #endif
6769 /* LDV_COMMENT_END_PREP */
6770 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "mtip_pci_driver" */
6771 ldv_handler_precall();
6772 mtip_pci_resume( var_group6);
6773
6774
6775
6776
6777 }
6778
6779 break;
6780 case 13: {
6781
6782 /** STRUCT: struct type: pci_driver, struct name: mtip_pci_driver **/
6783
6784
6785 /* content: static void mtip_pci_shutdown(struct pci_dev *pdev)*/
6786 /* LDV_COMMENT_BEGIN_PREP */
6787 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6788 #define AHCI_RX_FIS_SZ 0x100
6789 #define AHCI_RX_FIS_OFFSET 0x0
6790 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6791 #define AHCI_IDFY_OFFSET 0x400
6792 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6793 #define AHCI_SECTBUF_OFFSET 0x800
6794 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6795 #define AHCI_SMARTBUF_OFFSET 0xC00
6796 #define BLOCK_DMA_ALLOC_SZ 4096
6797 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6798 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6799 #define AHCI_CMD_TBL_OFFSET 0x0
6800 #define AHCI_CMD_TBL_HDR_SZ 0x80
6801 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6802 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6803 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6804 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6805 #define HOST_CAP_NZDMA (1 << 19)
6806 #define HOST_HSORG 0xFC
6807 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6808 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6809 #define HSORG_HWREV 0xFF00
6810 #define HSORG_STYLE 0x8
6811 #define HSORG_SLOTGROUPS 0x7
6812 #define PORT_COMMAND_ISSUE 0x38
6813 #define PORT_SDBV 0x7C
6814 #define PORT_OFFSET 0x100
6815 #define PORT_MEM_SIZE 0x80
6816 #define PORT_IRQ_ERR \
6817 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6818 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6819 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6820 PORT_IRQ_OVERFLOW)
6821 #define PORT_IRQ_LEGACY \
6822 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6823 #define PORT_IRQ_HANDLED \
6824 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6825 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6826 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6827 #define DEF_PORT_IRQ \
6828 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6829 #define MTIP_PRODUCT_UNKNOWN 0x00
6830 #define MTIP_PRODUCT_ASICFPGA 0x11
6831 #ifdef CONFIG_COMPAT
6832 #endif
6833 #ifdef __LITTLE_ENDIAN
6834 #else
6835 #endif
6836 #ifdef MTIP_TRIM
6837 #endif
6838 #ifdef CONFIG_COMPAT
6839 #endif
6840 #ifdef CONFIG_COMPAT
6841 #endif
6842 /* LDV_COMMENT_END_PREP */
6843 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "mtip_pci_driver" */
6844 ldv_handler_precall();
6845 mtip_pci_shutdown( var_group6);
6846
6847
6848
6849
6850 }
6851
6852 break;
6853 case 14: {
6854
6855 /** CALLBACK SECTION request_irq **/
6856 LDV_IN_INTERRUPT=2;
6857
6858 /* content: static irqreturn_t mtip_irq_handler(int irq, void *instance)*/
6859 /* LDV_COMMENT_BEGIN_PREP */
6860 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6861 #define AHCI_RX_FIS_SZ 0x100
6862 #define AHCI_RX_FIS_OFFSET 0x0
6863 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6864 #define AHCI_IDFY_OFFSET 0x400
6865 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6866 #define AHCI_SECTBUF_OFFSET 0x800
6867 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6868 #define AHCI_SMARTBUF_OFFSET 0xC00
6869 #define BLOCK_DMA_ALLOC_SZ 4096
6870 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6871 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6872 #define AHCI_CMD_TBL_OFFSET 0x0
6873 #define AHCI_CMD_TBL_HDR_SZ 0x80
6874 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6875 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6876 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6877 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6878 #define HOST_CAP_NZDMA (1 << 19)
6879 #define HOST_HSORG 0xFC
6880 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6881 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6882 #define HSORG_HWREV 0xFF00
6883 #define HSORG_STYLE 0x8
6884 #define HSORG_SLOTGROUPS 0x7
6885 #define PORT_COMMAND_ISSUE 0x38
6886 #define PORT_SDBV 0x7C
6887 #define PORT_OFFSET 0x100
6888 #define PORT_MEM_SIZE 0x80
6889 #define PORT_IRQ_ERR \
6890 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6891 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6892 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6893 PORT_IRQ_OVERFLOW)
6894 #define PORT_IRQ_LEGACY \
6895 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6896 #define PORT_IRQ_HANDLED \
6897 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6898 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6899 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6900 #define DEF_PORT_IRQ \
6901 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6902 #define MTIP_PRODUCT_UNKNOWN 0x00
6903 #define MTIP_PRODUCT_ASICFPGA 0x11
6904 #ifdef CONFIG_COMPAT
6905 #endif
6906 /* LDV_COMMENT_END_PREP */
6907 /* LDV_COMMENT_FUNCTION_CALL */
6908 ldv_handler_precall();
6909 mtip_irq_handler( var_mtip_irq_handler_23_p0, var_mtip_irq_handler_23_p1);
6910 /* LDV_COMMENT_BEGIN_PREP */
6911 #ifdef __LITTLE_ENDIAN
6912 #else
6913 #endif
6914 #ifdef MTIP_TRIM
6915 #endif
6916 #ifdef CONFIG_COMPAT
6917 #endif
6918 #ifdef CONFIG_COMPAT
6919 #endif
6920 /* LDV_COMMENT_END_PREP */
6921 LDV_IN_INTERRUPT=1;
6922
6923
6924
6925 }
6926
6927 break;
6928 default: break;
6929
6930 }
6931
6932 }
6933
6934 ldv_module_exit:
6935
6936 /** INIT: init_type: ST_MODULE_EXIT **/
6937 /* content: static void __exit mtip_exit(void)*/
6938 /* LDV_COMMENT_BEGIN_PREP */
6939 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
6940 #define AHCI_RX_FIS_SZ 0x100
6941 #define AHCI_RX_FIS_OFFSET 0x0
6942 #define AHCI_IDFY_SZ ATA_SECT_SIZE
6943 #define AHCI_IDFY_OFFSET 0x400
6944 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
6945 #define AHCI_SECTBUF_OFFSET 0x800
6946 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
6947 #define AHCI_SMARTBUF_OFFSET 0xC00
6948 #define BLOCK_DMA_ALLOC_SZ 4096
6949 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
6950 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
6951 #define AHCI_CMD_TBL_OFFSET 0x0
6952 #define AHCI_CMD_TBL_HDR_SZ 0x80
6953 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
6954 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
6955 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
6956 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
6957 #define HOST_CAP_NZDMA (1 << 19)
6958 #define HOST_HSORG 0xFC
6959 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
6960 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
6961 #define HSORG_HWREV 0xFF00
6962 #define HSORG_STYLE 0x8
6963 #define HSORG_SLOTGROUPS 0x7
6964 #define PORT_COMMAND_ISSUE 0x38
6965 #define PORT_SDBV 0x7C
6966 #define PORT_OFFSET 0x100
6967 #define PORT_MEM_SIZE 0x80
6968 #define PORT_IRQ_ERR \
6969 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
6970 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
6971 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
6972 PORT_IRQ_OVERFLOW)
6973 #define PORT_IRQ_LEGACY \
6974 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
6975 #define PORT_IRQ_HANDLED \
6976 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
6977 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
6978 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
6979 #define DEF_PORT_IRQ \
6980 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
6981 #define MTIP_PRODUCT_UNKNOWN 0x00
6982 #define MTIP_PRODUCT_ASICFPGA 0x11
6983 #ifdef CONFIG_COMPAT
6984 #endif
6985 #ifdef __LITTLE_ENDIAN
6986 #else
6987 #endif
6988 #ifdef MTIP_TRIM
6989 #endif
6990 #ifdef CONFIG_COMPAT
6991 #endif
6992 #ifdef CONFIG_COMPAT
6993 #endif
6994 /* LDV_COMMENT_END_PREP */
6995 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
6996 ldv_handler_precall();
6997 mtip_exit();
6998
6999 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
7000 ldv_final: ldv_check_final_state();
7001
7002 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
7003 return;
7004
7005 }
7006 #endif
7007
7008 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/types.h>
3 #include <linux/dma-direction.h>
4
5 extern dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
6 extern dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir);
7 extern dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
8 extern int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
9 #line 1 "/home/cluser/ldv/ref_launch/work/current--X--drivers--X--defaultlinux-4.5-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.5-rc1.tar.xz/csd_deg_dscv/11248/dscv_tempdir/dscv/ri/331_1a/drivers/block/mtip32xx/mtip32xx.c"
10
11 /*
12 * Driver for the Micron P320 SSD
13 * Copyright (C) 2011 Micron Technology, Inc.
14 *
15 * Portions of this code were derived from works subjected to the
16 * following copyright:
17 * Copyright (C) 2009 Integrated Device Technology, Inc.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 */
30
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/ata.h>
34 #include <linux/delay.h>
35 #include <linux/hdreg.h>
36 #include <linux/uaccess.h>
37 #include <linux/random.h>
38 #include <linux/smp.h>
39 #include <linux/compat.h>
40 #include <linux/fs.h>
41 #include <linux/module.h>
42 #include <linux/genhd.h>
43 #include <linux/blkdev.h>
44 #include <linux/blk-mq.h>
45 #include <linux/bio.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/idr.h>
48 #include <linux/kthread.h>
49 #include <../drivers/ata/ahci.h>
50 #include <linux/export.h>
51 #include <linux/debugfs.h>
52 #include <linux/prefetch.h>
53 #include "mtip32xx.h"
54
55 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
56
57 /* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
58 #define AHCI_RX_FIS_SZ 0x100
59 #define AHCI_RX_FIS_OFFSET 0x0
60 #define AHCI_IDFY_SZ ATA_SECT_SIZE
61 #define AHCI_IDFY_OFFSET 0x400
62 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE
63 #define AHCI_SECTBUF_OFFSET 0x800
64 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
65 #define AHCI_SMARTBUF_OFFSET 0xC00
66 /* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
67 #define BLOCK_DMA_ALLOC_SZ 4096
68
69 /* DMA region containing command table (should be 8192 bytes) */
70 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
71 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
72 #define AHCI_CMD_TBL_OFFSET 0x0
73
74 /* DMA region per command (contains header and SGL) */
75 #define AHCI_CMD_TBL_HDR_SZ 0x80
76 #define AHCI_CMD_TBL_HDR_OFFSET 0x0
77 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
78 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
79 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
80
81
82 #define HOST_CAP_NZDMA (1 << 19)
83 #define HOST_HSORG 0xFC
84 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
85 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
86 #define HSORG_HWREV 0xFF00
87 #define HSORG_STYLE 0x8
88 #define HSORG_SLOTGROUPS 0x7
89
90 #define PORT_COMMAND_ISSUE 0x38
91 #define PORT_SDBV 0x7C
92
93 #define PORT_OFFSET 0x100
94 #define PORT_MEM_SIZE 0x80
95
96 #define PORT_IRQ_ERR \
97 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
98 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
99 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
100 PORT_IRQ_OVERFLOW)
101 #define PORT_IRQ_LEGACY \
102 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
103 #define PORT_IRQ_HANDLED \
104 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
105 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
106 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
107 #define DEF_PORT_IRQ \
108 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
109
110 /* product numbers */
111 #define MTIP_PRODUCT_UNKNOWN 0x00
112 #define MTIP_PRODUCT_ASICFPGA 0x11
113
114 /* Device instance number, incremented each time a device is probed. */
115 static int instance;
116
117 static struct list_head online_list;
118 static struct list_head removing_list;
119 static spinlock_t dev_lock;
120
121 /*
122 * Global variable used to hold the major block device number
123 * allocated in mtip_init().
124 */
125 static int mtip_major;
126 static struct dentry *dfs_parent;
127 static struct dentry *dfs_device_status;
128
129 static u32 cpu_use[NR_CPUS];
130
131 static DEFINE_SPINLOCK(rssd_index_lock);
132 static DEFINE_IDA(rssd_index_ida);
133
134 static int mtip_block_initialize(struct driver_data *dd);
135
136 #ifdef CONFIG_COMPAT
137 struct mtip_compat_ide_task_request_s {
138 __u8 io_ports[8];
139 __u8 hob_ports[8];
140 ide_reg_valid_t out_flags;
141 ide_reg_valid_t in_flags;
142 int data_phase;
143 int req_cmd;
144 compat_ulong_t out_size;
145 compat_ulong_t in_size;
146 };
147 #endif
148
149 /*
150 * This function check_for_surprise_removal is called
151 * while card is removed from the system and it will
152 * read the vendor id from the configration space
153 *
154 * @pdev Pointer to the pci_dev structure.
155 *
156 * return value
157 * true if device removed, else false
158 */
159 static bool mtip_check_surprise_removal(struct pci_dev *pdev)
160 {
161 u16 vendor_id = 0;
162 struct driver_data *dd = pci_get_drvdata(pdev);
163
164 if (dd->sr)
165 return true;
166
167 /* Read the vendorID from the configuration space */
168 pci_read_config_word(pdev, 0x00, &vendor_id);
169 if (vendor_id == 0xFFFF) {
170 dd->sr = true;
171 if (dd->queue)
172 set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags);
173 else
174 dev_warn(&dd->pdev->dev,
175 "%s: dd->queue is NULL\n", __func__);
176 return true; /* device removed */
177 }
178
179 return false; /* device present */
180 }
181
182 static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
183 {
184 struct request *rq;
185
186 rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
187 return blk_mq_rq_to_pdu(rq);
188 }
189
190 static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
191 {
192 blk_put_request(blk_mq_rq_from_pdu(cmd));
193 }
194
195 /*
196 * Once we add support for one hctx per mtip group, this will change a bit
197 */
198 static struct request *mtip_rq_from_tag(struct driver_data *dd,
199 unsigned int tag)
200 {
201 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
202
203 return blk_mq_tag_to_rq(hctx->tags, tag);
204 }
205
206 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
207 unsigned int tag)
208 {
209 struct request *rq = mtip_rq_from_tag(dd, tag);
210
211 return blk_mq_rq_to_pdu(rq);
212 }
213
214 /*
215 * IO completion function.
216 *
217 * This completion function is called by the driver ISR when a
218 * command that was issued by the kernel completes. It first calls the
219 * asynchronous completion function which normally calls back into the block
220 * layer passing the asynchronous callback data, then unmaps the
221 * scatter list associated with the completed command, and finally
222 * clears the allocated bit associated with the completed command.
223 *
224 * @port Pointer to the port data structure.
225 * @tag Tag of the command.
226 * @data Pointer to driver_data.
227 * @status Completion status.
228 *
229 * return value
230 * None
231 */
232 static void mtip_async_complete(struct mtip_port *port,
233 int tag, struct mtip_cmd *cmd, int status)
234 {
235 struct driver_data *dd = port->dd;
236 struct request *rq;
237
238 if (unlikely(!dd) || unlikely(!port))
239 return;
240
241 if (unlikely(status == PORT_IRQ_TF_ERR)) {
242 dev_warn(&port->dd->pdev->dev,
243 "Command tag %d failed due to TFE\n", tag);
244 }
245
246 /* Unmap the DMA scatter list entries */
247 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
248
249 rq = mtip_rq_from_tag(dd, tag);
250
251 if (unlikely(cmd->unaligned))
252 up(&port->cmd_slot_unal);
253
254 blk_mq_end_request(rq, status ? -EIO : 0);
255 }
256
257 /*
258 * Reset the HBA (without sleeping)
259 *
260 * @dd Pointer to the driver data structure.
261 *
262 * return value
263 * 0 The reset was successful.
264 * -1 The HBA Reset bit did not clear.
265 */
266 static int mtip_hba_reset(struct driver_data *dd)
267 {
268 unsigned long timeout;
269
270 /* Set the reset bit */
271 writel(HOST_RESET, dd->mmio + HOST_CTL);
272
273 /* Flush */
274 readl(dd->mmio + HOST_CTL);
275
276 /*
277 * Spin for up to 10 seconds waiting for reset acknowledgement. Spec
278 * is 1 sec but in LUN failure conditions, up to 10 secs are required
279 */
280 timeout = jiffies + msecs_to_jiffies(10000);
281 do {
282 mdelay(10);
283 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
284 return -1;
285
286 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
287 && time_before(jiffies, timeout));
288
289 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
290 return -1;
291
292 return 0;
293 }
294
295 /*
296 * Issue a command to the hardware.
297 *
298 * Set the appropriate bit in the s_active and Command Issue hardware
299 * registers, causing hardware command processing to begin.
300 *
301 * @port Pointer to the port structure.
302 * @tag The tag of the command to be issued.
303 *
304 * return value
305 * None
306 */
307 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
308 {
309 int group = tag >> 5;
310
311 /* guard SACT and CI registers */
312 spin_lock(&port->cmd_issue_lock[group]);
313 writel((1 << MTIP_TAG_BIT(tag)),
314 port->s_active[MTIP_TAG_INDEX(tag)]);
315 writel((1 << MTIP_TAG_BIT(tag)),
316 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
317 spin_unlock(&port->cmd_issue_lock[group]);
318 }
319
320 /*
321 * Enable/disable the reception of FIS
322 *
323 * @port Pointer to the port data structure
324 * @enable 1 to enable, 0 to disable
325 *
326 * return value
327 * Previous state: 1 enabled, 0 disabled
328 */
329 static int mtip_enable_fis(struct mtip_port *port, int enable)
330 {
331 u32 tmp;
332
333 /* enable FIS reception */
334 tmp = readl(port->mmio + PORT_CMD);
335 if (enable)
336 writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
337 else
338 writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
339
340 /* Flush */
341 readl(port->mmio + PORT_CMD);
342
343 return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
344 }
345
346 /*
347 * Enable/disable the DMA engine
348 *
349 * @port Pointer to the port data structure
350 * @enable 1 to enable, 0 to disable
351 *
352 * return value
353 * Previous state: 1 enabled, 0 disabled.
354 */
355 static int mtip_enable_engine(struct mtip_port *port, int enable)
356 {
357 u32 tmp;
358
359 /* enable FIS reception */
360 tmp = readl(port->mmio + PORT_CMD);
361 if (enable)
362 writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
363 else
364 writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
365
366 readl(port->mmio + PORT_CMD);
367 return (((tmp & PORT_CMD_START) == PORT_CMD_START));
368 }
369
370 /*
371 * Enables the port DMA engine and FIS reception.
372 *
373 * return value
374 * None
375 */
376 static inline void mtip_start_port(struct mtip_port *port)
377 {
378 /* Enable FIS reception */
379 mtip_enable_fis(port, 1);
380
381 /* Enable the DMA engine */
382 mtip_enable_engine(port, 1);
383 }
384
385 /*
386 * Deinitialize a port by disabling port interrupts, the DMA engine,
387 * and FIS reception.
388 *
389 * @port Pointer to the port structure
390 *
391 * return value
392 * None
393 */
394 static inline void mtip_deinit_port(struct mtip_port *port)
395 {
396 /* Disable interrupts on this port */
397 writel(0, port->mmio + PORT_IRQ_MASK);
398
399 /* Disable the DMA engine */
400 mtip_enable_engine(port, 0);
401
402 /* Disable FIS reception */
403 mtip_enable_fis(port, 0);
404 }
405
406 /*
407 * Initialize a port.
408 *
409 * This function deinitializes the port by calling mtip_deinit_port() and
410 * then initializes it by setting the command header and RX FIS addresses,
411 * clearing the SError register and any pending port interrupts before
412 * re-enabling the default set of port interrupts.
413 *
414 * @port Pointer to the port structure.
415 *
416 * return value
417 * None
418 */
419 static void mtip_init_port(struct mtip_port *port)
420 {
421 int i;
422 mtip_deinit_port(port);
423
424 /* Program the command list base and FIS base addresses */
425 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
426 writel((port->command_list_dma >> 16) >> 16,
427 port->mmio + PORT_LST_ADDR_HI);
428 writel((port->rxfis_dma >> 16) >> 16,
429 port->mmio + PORT_FIS_ADDR_HI);
430 }
431
432 writel(port->command_list_dma & 0xFFFFFFFF,
433 port->mmio + PORT_LST_ADDR);
434 writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
435
436 /* Clear SError */
437 writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
438
439 /* reset the completed registers.*/
440 for (i = 0; i < port->dd->slot_groups; i++)
441 writel(0xFFFFFFFF, port->completed[i]);
442
443 /* Clear any pending interrupts for this port */
444 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
445
446 /* Clear any pending interrupts on the HBA. */
447 writel(readl(port->dd->mmio + HOST_IRQ_STAT),
448 port->dd->mmio + HOST_IRQ_STAT);
449
450 /* Enable port interrupts */
451 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
452 }
453
454 /*
455 * Restart a port
456 *
457 * @port Pointer to the port data structure.
458 *
459 * return value
460 * None
461 */
462 static void mtip_restart_port(struct mtip_port *port)
463 {
464 unsigned long timeout;
465
466 /* Disable the DMA engine */
467 mtip_enable_engine(port, 0);
468
469 /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
470 timeout = jiffies + msecs_to_jiffies(500);
471 while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
472 && time_before(jiffies, timeout))
473 ;
474
475 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
476 return;
477
478 /*
479 * Chip quirk: escalate to hba reset if
480 * PxCMD.CR not clear after 500 ms
481 */
482 if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
483 dev_warn(&port->dd->pdev->dev,
484 "PxCMD.CR not clear, escalating reset\n");
485
486 if (mtip_hba_reset(port->dd))
487 dev_err(&port->dd->pdev->dev,
488 "HBA reset escalation failed.\n");
489
490 /* 30 ms delay before com reset to quiesce chip */
491 mdelay(30);
492 }
493
494 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
495
496 /* Set PxSCTL.DET */
497 writel(readl(port->mmio + PORT_SCR_CTL) |
498 1, port->mmio + PORT_SCR_CTL);
499 readl(port->mmio + PORT_SCR_CTL);
500
501 /* Wait 1 ms to quiesce chip function */
502 timeout = jiffies + msecs_to_jiffies(1);
503 while (time_before(jiffies, timeout))
504 ;
505
506 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
507 return;
508
509 /* Clear PxSCTL.DET */
510 writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
511 port->mmio + PORT_SCR_CTL);
512 readl(port->mmio + PORT_SCR_CTL);
513
514 /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
515 timeout = jiffies + msecs_to_jiffies(500);
516 while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
517 && time_before(jiffies, timeout))
518 ;
519
520 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
521 return;
522
523 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
524 dev_warn(&port->dd->pdev->dev,
525 "COM reset failed\n");
526
527 mtip_init_port(port);
528 mtip_start_port(port);
529
530 }
531
532 static int mtip_device_reset(struct driver_data *dd)
533 {
534 int rv = 0;
535
536 if (mtip_check_surprise_removal(dd->pdev))
537 return 0;
538
539 if (mtip_hba_reset(dd) < 0)
540 rv = -EFAULT;
541
542 mdelay(1);
543 mtip_init_port(dd->port);
544 mtip_start_port(dd->port);
545
546 /* Enable interrupts on the HBA. */
547 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
548 dd->mmio + HOST_CTL);
549 return rv;
550 }
551
552 /*
553 * Helper function for tag logging
554 */
555 static void print_tags(struct driver_data *dd,
556 char *msg,
557 unsigned long *tagbits,
558 int cnt)
559 {
560 unsigned char tagmap[128];
561 int group, tagmap_len = 0;
562
563 memset(tagmap, 0, sizeof(tagmap));
564 for (group = SLOTBITS_IN_LONGS; group > 0; group--)
565 tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
566 tagbits[group-1]);
567 dev_warn(&dd->pdev->dev,
568 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
569 }
570
571 /*
572 * Internal command completion callback function.
573 *
574 * This function is normally called by the driver ISR when an internal
575 * command completed. This function signals the command completion by
576 * calling complete().
577 *
578 * @port Pointer to the port data structure.
579 * @tag Tag of the command that has completed.
580 * @data Pointer to a completion structure.
581 * @status Completion status.
582 *
583 * return value
584 * None
585 */
586 static void mtip_completion(struct mtip_port *port,
587 int tag, struct mtip_cmd *command, int status)
588 {
589 struct completion *waiting = command->comp_data;
590 if (unlikely(status == PORT_IRQ_TF_ERR))
591 dev_warn(&port->dd->pdev->dev,
592 "Internal command %d completed with TFE\n", tag);
593
594 complete(waiting);
595 }
596
597 static void mtip_null_completion(struct mtip_port *port,
598 int tag, struct mtip_cmd *command, int status)
599 {
600 }
601
602 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
603 dma_addr_t buffer_dma, unsigned int sectors);
604 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
605 struct smart_attr *attrib);
606 /*
607 * Handle an error.
608 *
609 * @dd Pointer to the DRIVER_DATA structure.
610 *
611 * return value
612 * None
613 */
614 static void mtip_handle_tfe(struct driver_data *dd)
615 {
616 int group, tag, bit, reissue, rv;
617 struct mtip_port *port;
618 struct mtip_cmd *cmd;
619 u32 completed;
620 struct host_to_dev_fis *fis;
621 unsigned long tagaccum[SLOTBITS_IN_LONGS];
622 unsigned int cmd_cnt = 0;
623 unsigned char *buf;
624 char *fail_reason = NULL;
625 int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
626
627 dev_warn(&dd->pdev->dev, "Taskfile error\n");
628
629 port = dd->port;
630
631 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
632
633 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
634 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
635 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
636
637 if (cmd->comp_data && cmd->comp_func) {
638 cmd->comp_func(port, MTIP_TAG_INTERNAL,
639 cmd, PORT_IRQ_TF_ERR);
640 }
641 goto handle_tfe_exit;
642 }
643
644 /* clear the tag accumulator */
645 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
646
647 /* Loop through all the groups */
648 for (group = 0; group < dd->slot_groups; group++) {
649 completed = readl(port->completed[group]);
650
651 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
652
653 /* clear completed status register in the hardware.*/
654 writel(completed, port->completed[group]);
655
656 /* Process successfully completed commands */
657 for (bit = 0; bit < 32 && completed; bit++) {
658 if (!(completed & (1<<bit)))
659 continue;
660 tag = (group << 5) + bit;
661
662 /* Skip the internal command slot */
663 if (tag == MTIP_TAG_INTERNAL)
664 continue;
665
666 cmd = mtip_cmd_from_tag(dd, tag);
667 if (likely(cmd->comp_func)) {
668 set_bit(tag, tagaccum);
669 cmd_cnt++;
670 cmd->comp_func(port, tag, cmd, 0);
671 } else {
672 dev_err(&port->dd->pdev->dev,
673 "Missing completion func for tag %d",
674 tag);
675 if (mtip_check_surprise_removal(dd->pdev)) {
676 /* don't proceed further */
677 return;
678 }
679 }
680 }
681 }
682
683 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
684
685 /* Restart the port */
686 mdelay(20);
687 mtip_restart_port(port);
688
689 /* Trying to determine the cause of the error */
690 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
691 dd->port->log_buf,
692 dd->port->log_buf_dma, 1);
693 if (rv) {
694 dev_warn(&dd->pdev->dev,
695 "Error in READ LOG EXT (10h) command\n");
696 /* non-critical error, don't fail the load */
697 } else {
698 buf = (unsigned char *)dd->port->log_buf;
699 if (buf[259] & 0x1) {
700 dev_info(&dd->pdev->dev,
701 "Write protect bit is set.\n");
702 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
703 fail_all_ncq_write = 1;
704 fail_reason = "write protect";
705 }
706 if (buf[288] == 0xF7) {
707 dev_info(&dd->pdev->dev,
708 "Exceeded Tmax, drive in thermal shutdown.\n");
709 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
710 fail_all_ncq_cmds = 1;
711 fail_reason = "thermal shutdown";
712 }
713 if (buf[288] == 0xBF) {
714 set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
715 dev_info(&dd->pdev->dev,
716 "Drive indicates rebuild has failed. Secure erase required.\n");
717 fail_all_ncq_cmds = 1;
718 fail_reason = "rebuild failed";
719 }
720 }
721
722 /* clear the tag accumulator */
723 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
724
725 /* Loop through all the groups */
726 for (group = 0; group < dd->slot_groups; group++) {
727 for (bit = 0; bit < 32; bit++) {
728 reissue = 1;
729 tag = (group << 5) + bit;
730 cmd = mtip_cmd_from_tag(dd, tag);
731
732 fis = (struct host_to_dev_fis *)cmd->command;
733
734 /* Should re-issue? */
735 if (tag == MTIP_TAG_INTERNAL ||
736 fis->command == ATA_CMD_SET_FEATURES)
737 reissue = 0;
738 else {
739 if (fail_all_ncq_cmds ||
740 (fail_all_ncq_write &&
741 fis->command == ATA_CMD_FPDMA_WRITE)) {
742 dev_warn(&dd->pdev->dev,
743 " Fail: %s w/tag %d [%s].\n",
744 fis->command == ATA_CMD_FPDMA_WRITE ?
745 "write" : "read",
746 tag,
747 fail_reason != NULL ?
748 fail_reason : "unknown");
749 if (cmd->comp_func) {
750 cmd->comp_func(port, tag,
751 cmd, -ENODATA);
752 }
753 continue;
754 }
755 }
756
757 /*
758 * First check if this command has
759 * exceeded its retries.
760 */
761 if (reissue && (cmd->retries-- > 0)) {
762
763 set_bit(tag, tagaccum);
764
765 /* Re-issue the command. */
766 mtip_issue_ncq_command(port, tag);
767
768 continue;
769 }
770
771 /* Retire a command that will not be reissued */
772 dev_warn(&port->dd->pdev->dev,
773 "retiring tag %d\n", tag);
774
775 if (cmd->comp_func)
776 cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
777 else
778 dev_warn(&port->dd->pdev->dev,
779 "Bad completion for tag %d\n",
780 tag);
781 }
782 }
783 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
784
785 handle_tfe_exit:
786 /* clear eh_active */
787 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
788 wake_up_interruptible(&port->svc_wait);
789 }
790
791 /*
792 * Handle a set device bits interrupt
793 */
794 static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
795 u32 completed)
796 {
797 struct driver_data *dd = port->dd;
798 int tag, bit;
799 struct mtip_cmd *command;
800
801 if (!completed) {
802 WARN_ON_ONCE(!completed);
803 return;
804 }
805 /* clear completed status register in the hardware.*/
806 writel(completed, port->completed[group]);
807
808 /* Process completed commands. */
809 for (bit = 0; (bit < 32) && completed; bit++) {
810 if (completed & 0x01) {
811 tag = (group << 5) | bit;
812
813 /* skip internal command slot. */
814 if (unlikely(tag == MTIP_TAG_INTERNAL))
815 continue;
816
817 command = mtip_cmd_from_tag(dd, tag);
818 if (likely(command->comp_func))
819 command->comp_func(port, tag, command, 0);
820 else {
821 dev_dbg(&dd->pdev->dev,
822 "Null completion for tag %d",
823 tag);
824
825 if (mtip_check_surprise_removal(
826 dd->pdev)) {
827 return;
828 }
829 }
830 }
831 completed >>= 1;
832 }
833
834 /* If last, re-enable interrupts */
835 if (atomic_dec_return(&dd->irq_workers_active) == 0)
836 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
837 }
838
839 /*
840 * Process legacy pio and d2h interrupts
841 */
842 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
843 {
844 struct mtip_port *port = dd->port;
845 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
846
847 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
848 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
849 & (1 << MTIP_TAG_INTERNAL))) {
850 if (cmd->comp_func) {
851 cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
852 return;
853 }
854 }
855
856 return;
857 }
858
859 /*
860 * Demux and handle errors
861 */
862 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
863 {
864
865 if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
866 dev_warn(&dd->pdev->dev,
867 "Clearing PxSERR.DIAG.x\n");
868 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
869 }
870
871 if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
872 dev_warn(&dd->pdev->dev,
873 "Clearing PxSERR.DIAG.n\n");
874 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
875 }
876
877 if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
878 dev_warn(&dd->pdev->dev,
879 "Port stat errors %x unhandled\n",
880 (port_stat & ~PORT_IRQ_HANDLED));
881 if (mtip_check_surprise_removal(dd->pdev))
882 return;
883 }
884 if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
885 set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
886 wake_up_interruptible(&dd->port->svc_wait);
887 }
888 }
889
890 static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
891 {
892 struct driver_data *dd = (struct driver_data *) data;
893 struct mtip_port *port = dd->port;
894 u32 hba_stat, port_stat;
895 int rv = IRQ_NONE;
896 int do_irq_enable = 1, i, workers;
897 struct mtip_work *twork;
898
899 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
900 if (hba_stat) {
901 rv = IRQ_HANDLED;
902
903 /* Acknowledge the interrupt status on the port.*/
904 port_stat = readl(port->mmio + PORT_IRQ_STAT);
905 if (unlikely(port_stat == 0xFFFFFFFF)) {
906 mtip_check_surprise_removal(dd->pdev);
907 return IRQ_HANDLED;
908 }
909 writel(port_stat, port->mmio + PORT_IRQ_STAT);
910
911 /* Demux port status */
912 if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
913 do_irq_enable = 0;
914 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
915
916 /* Start at 1: group zero is always local? */
917 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
918 i++) {
919 twork = &dd->work[i];
920 twork->completed = readl(port->completed[i]);
921 if (twork->completed)
922 workers++;
923 }
924
925 atomic_set(&dd->irq_workers_active, workers);
926 if (workers) {
927 for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
928 twork = &dd->work[i];
929 if (twork->completed)
930 queue_work_on(
931 twork->cpu_binding,
932 dd->isr_workq,
933 &twork->work);
934 }
935
936 if (likely(dd->work[0].completed))
937 mtip_workq_sdbfx(port, 0,
938 dd->work[0].completed);
939
940 } else {
941 /*
942 * Chip quirk: SDB interrupt but nothing
943 * to complete
944 */
945 do_irq_enable = 1;
946 }
947 }
948
949 if (unlikely(port_stat & PORT_IRQ_ERR)) {
950 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
951 /* don't proceed further */
952 return IRQ_HANDLED;
953 }
954 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
955 &dd->dd_flag))
956 return rv;
957
958 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
959 }
960
961 if (unlikely(port_stat & PORT_IRQ_LEGACY))
962 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
963 }
964
965 /* acknowledge interrupt */
966 if (unlikely(do_irq_enable))
967 writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
968
969 return rv;
970 }
971
972 /*
973 * HBA interrupt subroutine.
974 *
975 * @irq IRQ number.
976 * @instance Pointer to the driver data structure.
977 *
978 * return value
979 * IRQ_HANDLED A HBA interrupt was pending and handled.
980 * IRQ_NONE This interrupt was not for the HBA.
981 */
982 static irqreturn_t mtip_irq_handler(int irq, void *instance)
983 {
984 struct driver_data *dd = instance;
985
986 return mtip_handle_irq(dd);
987 }
988
989 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
990 {
991 writel(1 << MTIP_TAG_BIT(tag),
992 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
993 }
994
995 static bool mtip_pause_ncq(struct mtip_port *port,
996 struct host_to_dev_fis *fis)
997 {
998 struct host_to_dev_fis *reply;
999 unsigned long task_file_data;
1000
1001 reply = port->rxfis + RX_FIS_D2H_REG;
1002 task_file_data = readl(port->mmio+PORT_TFDATA);
1003
1004 if ((task_file_data & 1))
1005 return false;
1006
1007 if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
1008 port->ic_pause_timer = jiffies;
1009 return true;
1010 } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
1011 (fis->features == 0x03)) {
1012 set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1013 port->ic_pause_timer = jiffies;
1014 return true;
1015 } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
1016 ((fis->command == 0xFC) &&
1017 (fis->features == 0x27 || fis->features == 0x72 ||
1018 fis->features == 0x62 || fis->features == 0x26))) {
1019 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1020 /* Com reset after secure erase or lowlevel format */
1021 mtip_restart_port(port);
1022 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1023 return false;
1024 }
1025
1026 return false;
1027 }
1028
1029 /*
1030 * Wait for port to quiesce
1031 *
1032 * @port Pointer to port data structure
1033 * @timeout Max duration to wait (ms)
1034 *
1035 * return value
1036 * 0 Success
1037 * -EBUSY Commands still active
1038 */
1039 static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
1040 {
1041 unsigned long to;
1042 unsigned int n;
1043 unsigned int active = 1;
1044
1045 blk_mq_stop_hw_queues(port->dd->queue);
1046
1047 to = jiffies + msecs_to_jiffies(timeout);
1048 do {
1049 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
1050 test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
1051 msleep(20);
1052 continue; /* svc thd is actively issuing commands */
1053 }
1054
1055 msleep(100);
1056 if (mtip_check_surprise_removal(port->dd->pdev))
1057 goto err_fault;
1058 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1059 goto err_fault;
1060
1061 /*
1062 * Ignore s_active bit 0 of array element 0.
1063 * This bit will always be set
1064 */
1065 active = readl(port->s_active[0]) & 0xFFFFFFFE;
1066 for (n = 1; n < port->dd->slot_groups; n++)
1067 active |= readl(port->s_active[n]);
1068
1069 if (!active)
1070 break;
1071 } while (time_before(jiffies, to));
1072
1073 blk_mq_start_stopped_hw_queues(port->dd->queue, true);
1074 return active ? -EBUSY : 0;
1075 err_fault:
1076 blk_mq_start_stopped_hw_queues(port->dd->queue, true);
1077 return -EFAULT;
1078 }
1079
1080 /*
1081 * Execute an internal command and wait for the completion.
1082 *
1083 * @port Pointer to the port data structure.
1084 * @fis Pointer to the FIS that describes the command.
1085 * @fis_len Length in WORDS of the FIS.
1086 * @buffer DMA accessible for command data.
1087 * @buf_len Length, in bytes, of the data buffer.
1088 * @opts Command header options, excluding the FIS length
1089 * and the number of PRD entries.
1090 * @timeout Time in ms to wait for the command to complete.
1091 *
1092 * return value
1093 * 0 Command completed successfully.
1094 * -EFAULT The buffer address is not correctly aligned.
1095 * -EBUSY Internal command or other IO in progress.
1096 * -EAGAIN Time out waiting for command to complete.
1097 */
1098 static int mtip_exec_internal_command(struct mtip_port *port,
1099 struct host_to_dev_fis *fis,
1100 int fis_len,
1101 dma_addr_t buffer,
1102 int buf_len,
1103 u32 opts,
1104 gfp_t atomic,
1105 unsigned long timeout)
1106 {
1107 struct mtip_cmd_sg *command_sg;
1108 DECLARE_COMPLETION_ONSTACK(wait);
1109 struct mtip_cmd *int_cmd;
1110 struct driver_data *dd = port->dd;
1111 int rv = 0;
1112
1113 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1114 if (buffer & 0x00000007) {
1115 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
1116 return -EFAULT;
1117 }
1118
1119 int_cmd = mtip_get_int_command(dd);
1120
1121 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1122
1123 if (fis->command == ATA_CMD_SEC_ERASE_PREP)
1124 set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1125
1126 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1127
1128 if (atomic == GFP_KERNEL) {
1129 if (fis->command != ATA_CMD_STANDBYNOW1) {
1130 /* wait for io to complete if non atomic */
1131 if (mtip_quiesce_io(port,
1132 MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
1133 dev_warn(&dd->pdev->dev,
1134 "Failed to quiesce IO\n");
1135 mtip_put_int_command(dd, int_cmd);
1136 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1137 wake_up_interruptible(&port->svc_wait);
1138 return -EBUSY;
1139 }
1140 }
1141
1142 /* Set the completion function and data for the command. */
1143 int_cmd->comp_data = &wait;
1144 int_cmd->comp_func = mtip_completion;
1145
1146 } else {
1147 /* Clear completion - we're going to poll */
1148 int_cmd->comp_data = NULL;
1149 int_cmd->comp_func = mtip_null_completion;
1150 }
1151
1152 /* Copy the command to the command table */
1153 memcpy(int_cmd->command, fis, fis_len*4);
1154
1155 /* Populate the SG list */
1156 int_cmd->command_header->opts =
1157 __force_bit2int cpu_to_le32(opts | fis_len);
1158 if (buf_len) {
1159 command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
1160
1161 command_sg->info =
1162 __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
1163 command_sg->dba =
1164 __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
1165 command_sg->dba_upper =
1166 __force_bit2int cpu_to_le32((buffer >> 16) >> 16);
1167
1168 int_cmd->command_header->opts |=
1169 __force_bit2int cpu_to_le32((1 << 16));
1170 }
1171
1172 /* Populate the command header */
1173 int_cmd->command_header->byte_count = 0;
1174
1175 /* Issue the command to the hardware */
1176 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
1177
1178 if (atomic == GFP_KERNEL) {
1179 /* Wait for the command to complete or timeout. */
1180 if ((rv = wait_for_completion_interruptible_timeout(
1181 &wait,
1182 msecs_to_jiffies(timeout))) <= 0) {
1183 if (rv == -ERESTARTSYS) { /* interrupted */
1184 dev_err(&dd->pdev->dev,
1185 "Internal command [%02X] was interrupted after %lu ms\n",
1186 fis->command, timeout);
1187 rv = -EINTR;
1188 goto exec_ic_exit;
1189 } else if (rv == 0) /* timeout */
1190 dev_err(&dd->pdev->dev,
1191 "Internal command did not complete [%02X] within timeout of %lu ms\n",
1192 fis->command, timeout);
1193 else
1194 dev_err(&dd->pdev->dev,
1195 "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
1196 fis->command, rv, timeout);
1197
1198 if (mtip_check_surprise_removal(dd->pdev) ||
1199 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1200 &dd->dd_flag)) {
1201 dev_err(&dd->pdev->dev,
1202 "Internal command [%02X] wait returned due to SR\n",
1203 fis->command);
1204 rv = -ENXIO;
1205 goto exec_ic_exit;
1206 }
1207 mtip_device_reset(dd); /* recover from timeout issue */
1208 rv = -EAGAIN;
1209 goto exec_ic_exit;
1210 }
1211 } else {
1212 u32 hba_stat, port_stat;
1213
1214 /* Spin for <timeout> checking if command still outstanding */
1215 timeout = jiffies + msecs_to_jiffies(timeout);
1216 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1217 & (1 << MTIP_TAG_INTERNAL))
1218 && time_before(jiffies, timeout)) {
1219 if (mtip_check_surprise_removal(dd->pdev)) {
1220 rv = -ENXIO;
1221 goto exec_ic_exit;
1222 }
1223 if ((fis->command != ATA_CMD_STANDBYNOW1) &&
1224 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1225 &dd->dd_flag)) {
1226 rv = -ENXIO;
1227 goto exec_ic_exit;
1228 }
1229 port_stat = readl(port->mmio + PORT_IRQ_STAT);
1230 if (!port_stat)
1231 continue;
1232
1233 if (port_stat & PORT_IRQ_ERR) {
1234 dev_err(&dd->pdev->dev,
1235 "Internal command [%02X] failed\n",
1236 fis->command);
1237 mtip_device_reset(dd);
1238 rv = -EIO;
1239 goto exec_ic_exit;
1240 } else {
1241 writel(port_stat, port->mmio + PORT_IRQ_STAT);
1242 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
1243 if (hba_stat)
1244 writel(hba_stat,
1245 dd->mmio + HOST_IRQ_STAT);
1246 }
1247 break;
1248 }
1249 }
1250
1251 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1252 & (1 << MTIP_TAG_INTERNAL)) {
1253 rv = -ENXIO;
1254 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
1255 mtip_device_reset(dd);
1256 rv = -EAGAIN;
1257 }
1258 }
1259 exec_ic_exit:
1260 /* Clear the allocated and active bits for the internal command. */
1261 mtip_put_int_command(dd, int_cmd);
1262 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1263 if (rv >= 0 && mtip_pause_ncq(port, fis)) {
1264 /* NCQ paused */
1265 return rv;
1266 }
1267 wake_up_interruptible(&port->svc_wait);
1268
1269 return rv;
1270 }
1271
1272 /*
1273 * Byte-swap ATA ID strings.
1274 *
1275 * ATA identify data contains strings in byte-swapped 16-bit words.
1276 * They must be swapped (on all architectures) to be usable as C strings.
1277 * This function swaps bytes in-place.
1278 *
1279 * @buf The buffer location of the string
1280 * @len The number of bytes to swap
1281 *
1282 * return value
1283 * None
1284 */
1285 static inline void ata_swap_string(u16 *buf, unsigned int len)
1286 {
1287 int i;
1288 for (i = 0; i < (len/2); i++)
1289 be16_to_cpus(&buf[i]);
1290 }
1291
1292 static void mtip_set_timeout(struct driver_data *dd,
1293 struct host_to_dev_fis *fis,
1294 unsigned int *timeout, u8 erasemode)
1295 {
1296 switch (fis->command) {
1297 case ATA_CMD_DOWNLOAD_MICRO:
1298 *timeout = 120000; /* 2 minutes */
1299 break;
1300 case ATA_CMD_SEC_ERASE_UNIT:
1301 case 0xFC:
1302 if (erasemode)
1303 *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1304 else
1305 *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1306 break;
1307 case ATA_CMD_STANDBYNOW1:
1308 *timeout = 120000; /* 2 minutes */
1309 break;
1310 case 0xF7:
1311 case 0xFA:
1312 *timeout = 60000; /* 60 seconds */
1313 break;
1314 case ATA_CMD_SMART:
1315 *timeout = 15000; /* 15 seconds */
1316 break;
1317 default:
1318 *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS;
1319 break;
1320 }
1321 }
1322
1323 /*
1324 * Request the device identity information.
1325 *
1326 * If a user space buffer is not specified, i.e. is NULL, the
1327 * identify information is still read from the drive and placed
1328 * into the identify data buffer (@e port->identify) in the
1329 * port data structure.
1330 * When the identify buffer contains valid identify information @e
1331 * port->identify_valid is non-zero.
1332 *
1333 * @port Pointer to the port structure.
1334 * @user_buffer A user space buffer where the identify data should be
1335 * copied.
1336 *
1337 * return value
1338 * 0 Command completed successfully.
1339 * -EFAULT An error occurred while coping data to the user buffer.
1340 * -1 Command failed.
1341 */
1342 static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
1343 {
1344 int rv = 0;
1345 struct host_to_dev_fis fis;
1346
1347 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1348 return -EFAULT;
1349
1350 /* Build the FIS. */
1351 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1352 fis.type = 0x27;
1353 fis.opts = 1 << 7;
1354 fis.command = ATA_CMD_ID_ATA;
1355
1356 /* Set the identify information as invalid. */
1357 port->identify_valid = 0;
1358
1359 /* Clear the identify information. */
1360 memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
1361
1362 /* Execute the command. */
1363 if (mtip_exec_internal_command(port,
1364 &fis,
1365 5,
1366 port->identify_dma,
1367 sizeof(u16) * ATA_ID_WORDS,
1368 0,
1369 GFP_KERNEL,
1370 MTIP_INT_CMD_TIMEOUT_MS)
1371 < 0) {
1372 rv = -1;
1373 goto out;
1374 }
1375
1376 /*
1377 * Perform any necessary byte-swapping. Yes, the kernel does in fact
1378 * perform field-sensitive swapping on the string fields.
1379 * See the kernel use of ata_id_string() for proof of this.
1380 */
1381 #ifdef __LITTLE_ENDIAN
1382 ata_swap_string(port->identify + 27, 40); /* model string*/
1383 ata_swap_string(port->identify + 23, 8); /* firmware string*/
1384 ata_swap_string(port->identify + 10, 20); /* serial# string*/
1385 #else
1386 {
1387 int i;
1388 for (i = 0; i < ATA_ID_WORDS; i++)
1389 port->identify[i] = le16_to_cpu(port->identify[i]);
1390 }
1391 #endif
1392
1393 /* Check security locked state */
1394 if (port->identify[128] & 0x4)
1395 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1396 else
1397 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1398
1399 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
1400 /* Demux ID.DRAT & ID.RZAT to determine trim support */
1401 if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
1402 port->dd->trim_supp = true;
1403 else
1404 #endif
1405 port->dd->trim_supp = false;
1406
1407 /* Set the identify buffer as valid. */
1408 port->identify_valid = 1;
1409
1410 if (user_buffer) {
1411 if (copy_to_user(
1412 user_buffer,
1413 port->identify,
1414 ATA_ID_WORDS * sizeof(u16))) {
1415 rv = -EFAULT;
1416 goto out;
1417 }
1418 }
1419
1420 out:
1421 return rv;
1422 }
1423
1424 /*
1425 * Issue a standby immediate command to the device.
1426 *
1427 * @port Pointer to the port structure.
1428 *
1429 * return value
1430 * 0 Command was executed successfully.
1431 * -1 An error occurred while executing the command.
1432 */
1433 static int mtip_standby_immediate(struct mtip_port *port)
1434 {
1435 int rv;
1436 struct host_to_dev_fis fis;
1437 unsigned long start;
1438 unsigned int timeout;
1439
1440 /* Build the FIS. */
1441 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1442 fis.type = 0x27;
1443 fis.opts = 1 << 7;
1444 fis.command = ATA_CMD_STANDBYNOW1;
1445
1446 mtip_set_timeout(port->dd, &fis, &timeout, 0);
1447
1448 start = jiffies;
1449 rv = mtip_exec_internal_command(port,
1450 &fis,
1451 5,
1452 0,
1453 0,
1454 0,
1455 GFP_ATOMIC,
1456 timeout);
1457 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
1458 jiffies_to_msecs(jiffies - start));
1459 if (rv)
1460 dev_warn(&port->dd->pdev->dev,
1461 "STANDBY IMMEDIATE command failed.\n");
1462
1463 return rv;
1464 }
1465
1466 /*
1467 * Issue a READ LOG EXT command to the device.
1468 *
1469 * @port pointer to the port structure.
1470 * @page page number to fetch
1471 * @buffer pointer to buffer
1472 * @buffer_dma dma address corresponding to @buffer
1473 * @sectors page length to fetch, in sectors
1474 *
1475 * return value
1476 * @rv return value from mtip_exec_internal_command()
1477 */
1478 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
1479 dma_addr_t buffer_dma, unsigned int sectors)
1480 {
1481 struct host_to_dev_fis fis;
1482
1483 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1484 fis.type = 0x27;
1485 fis.opts = 1 << 7;
1486 fis.command = ATA_CMD_READ_LOG_EXT;
1487 fis.sect_count = sectors & 0xFF;
1488 fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
1489 fis.lba_low = page;
1490 fis.lba_mid = 0;
1491 fis.device = ATA_DEVICE_OBS;
1492
1493 memset(buffer, 0, sectors * ATA_SECT_SIZE);
1494
1495 return mtip_exec_internal_command(port,
1496 &fis,
1497 5,
1498 buffer_dma,
1499 sectors * ATA_SECT_SIZE,
1500 0,
1501 GFP_ATOMIC,
1502 MTIP_INT_CMD_TIMEOUT_MS);
1503 }
1504
1505 /*
1506 * Issue a SMART READ DATA command to the device.
1507 *
1508 * @port pointer to the port structure.
1509 * @buffer pointer to buffer
1510 * @buffer_dma dma address corresponding to @buffer
1511 *
1512 * return value
1513 * @rv return value from mtip_exec_internal_command()
1514 */
1515 static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
1516 dma_addr_t buffer_dma)
1517 {
1518 struct host_to_dev_fis fis;
1519
1520 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1521 fis.type = 0x27;
1522 fis.opts = 1 << 7;
1523 fis.command = ATA_CMD_SMART;
1524 fis.features = 0xD0;
1525 fis.sect_count = 1;
1526 fis.lba_mid = 0x4F;
1527 fis.lba_hi = 0xC2;
1528 fis.device = ATA_DEVICE_OBS;
1529
1530 return mtip_exec_internal_command(port,
1531 &fis,
1532 5,
1533 buffer_dma,
1534 ATA_SECT_SIZE,
1535 0,
1536 GFP_ATOMIC,
1537 15000);
1538 }
1539
1540 /*
1541 * Get the value of a smart attribute
1542 *
1543 * @port pointer to the port structure
1544 * @id attribute number
1545 * @attrib pointer to return attrib information corresponding to @id
1546 *
1547 * return value
1548 * -EINVAL NULL buffer passed or unsupported attribute @id.
1549 * -EPERM Identify data not valid, SMART not supported or not enabled
1550 */
1551 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
1552 struct smart_attr *attrib)
1553 {
1554 int rv, i;
1555 struct smart_attr *pattr;
1556
1557 if (!attrib)
1558 return -EINVAL;
1559
1560 if (!port->identify_valid) {
1561 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
1562 return -EPERM;
1563 }
1564 if (!(port->identify[82] & 0x1)) {
1565 dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
1566 return -EPERM;
1567 }
1568 if (!(port->identify[85] & 0x1)) {
1569 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
1570 return -EPERM;
1571 }
1572
1573 memset(port->smart_buf, 0, ATA_SECT_SIZE);
1574 rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
1575 if (rv) {
1576 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
1577 return rv;
1578 }
1579
1580 pattr = (struct smart_attr *)(port->smart_buf + 2);
1581 for (i = 0; i < 29; i++, pattr++)
1582 if (pattr->attr_id == id) {
1583 memcpy(attrib, pattr, sizeof(struct smart_attr));
1584 break;
1585 }
1586
1587 if (i == 29) {
1588 dev_warn(&port->dd->pdev->dev,
1589 "Query for invalid SMART attribute ID\n");
1590 rv = -EINVAL;
1591 }
1592
1593 return rv;
1594 }
1595
1596 /*
1597 * Trim unused sectors
1598 *
1599 * @dd pointer to driver_data structure
1600 * @lba starting lba
1601 * @len # of 512b sectors to trim
1602 *
1603 * return value
1604 * -ENOMEM Out of dma memory
1605 * -EINVAL Invalid parameters passed in, trim not supported
1606 * -EIO Error submitting trim request to hw
1607 */
1608 static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
1609 unsigned int len)
1610 {
1611 int i, rv = 0;
1612 u64 tlba, tlen, sect_left;
1613 struct mtip_trim_entry *buf;
1614 dma_addr_t dma_addr;
1615 struct host_to_dev_fis fis;
1616
1617 if (!len || dd->trim_supp == false)
1618 return -EINVAL;
1619
1620 /* Trim request too big */
1621 WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
1622
1623 /* Trim request not aligned on 4k boundary */
1624 WARN_ON(len % 8 != 0);
1625
1626 /* Warn if vu_trim structure is too big */
1627 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
1628
1629 /* Allocate a DMA buffer for the trim structure */
1630 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
1631 GFP_KERNEL);
1632 if (!buf)
1633 return -ENOMEM;
1634 memset(buf, 0, ATA_SECT_SIZE);
1635
1636 for (i = 0, sect_left = len, tlba = lba;
1637 i < MTIP_MAX_TRIM_ENTRIES && sect_left;
1638 i++) {
1639 tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
1640 MTIP_MAX_TRIM_ENTRY_LEN :
1641 sect_left);
1642 buf[i].lba = __force_bit2int cpu_to_le32(tlba);
1643 buf[i].range = __force_bit2int cpu_to_le16(tlen);
1644 tlba += tlen;
1645 sect_left -= tlen;
1646 }
1647 WARN_ON(sect_left != 0);
1648
1649 /* Build the fis */
1650 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1651 fis.type = 0x27;
1652 fis.opts = 1 << 7;
1653 fis.command = 0xfb;
1654 fis.features = 0x60;
1655 fis.sect_count = 1;
1656 fis.device = ATA_DEVICE_OBS;
1657
1658 if (mtip_exec_internal_command(dd->port,
1659 &fis,
1660 5,
1661 dma_addr,
1662 ATA_SECT_SIZE,
1663 0,
1664 GFP_KERNEL,
1665 MTIP_TRIM_TIMEOUT_MS) < 0)
1666 rv = -EIO;
1667
1668 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
1669 return rv;
1670 }
1671
1672 /*
1673 * Get the drive capacity.
1674 *
1675 * @dd Pointer to the device data structure.
1676 * @sectors Pointer to the variable that will receive the sector count.
1677 *
1678 * return value
1679 * 1 Capacity was returned successfully.
1680 * 0 The identify information is invalid.
1681 */
1682 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1683 {
1684 struct mtip_port *port = dd->port;
1685 u64 total, raw0, raw1, raw2, raw3;
1686 raw0 = port->identify[100];
1687 raw1 = port->identify[101];
1688 raw2 = port->identify[102];
1689 raw3 = port->identify[103];
1690 total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
1691 *sectors = total;
1692 return (bool) !!port->identify_valid;
1693 }
1694
1695 /*
1696 * Display the identify command data.
1697 *
1698 * @port Pointer to the port data structure.
1699 *
1700 * return value
1701 * None
1702 */
1703 static void mtip_dump_identify(struct mtip_port *port)
1704 {
1705 sector_t sectors;
1706 unsigned short revid;
1707 char cbuf[42];
1708
1709 if (!port->identify_valid)
1710 return;
1711
1712 strlcpy(cbuf, (char *)(port->identify+10), 21);
1713 dev_info(&port->dd->pdev->dev,
1714 "Serial No.: %s\n", cbuf);
1715
1716 strlcpy(cbuf, (char *)(port->identify+23), 9);
1717 dev_info(&port->dd->pdev->dev,
1718 "Firmware Ver.: %s\n", cbuf);
1719
1720 strlcpy(cbuf, (char *)(port->identify+27), 41);
1721 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
1722
1723 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
1724 port->identify[128],
1725 port->identify[128] & 0x4 ? "(LOCKED)" : "");
1726
1727 if (mtip_hw_get_capacity(port->dd, §ors))
1728 dev_info(&port->dd->pdev->dev,
1729 "Capacity: %llu sectors (%llu MB)\n",
1730 (u64)sectors,
1731 ((u64)sectors) * ATA_SECT_SIZE >> 20);
1732
1733 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
1734 switch (revid & 0xFF) {
1735 case 0x1:
1736 strlcpy(cbuf, "A0", 3);
1737 break;
1738 case 0x3:
1739 strlcpy(cbuf, "A2", 3);
1740 break;
1741 default:
1742 strlcpy(cbuf, "?", 2);
1743 break;
1744 }
1745 dev_info(&port->dd->pdev->dev,
1746 "Card Type: %s\n", cbuf);
1747 }
1748
1749 /*
1750 * Map the commands scatter list into the command table.
1751 *
1752 * @command Pointer to the command.
1753 * @nents Number of scatter list entries.
1754 *
1755 * return value
1756 * None
1757 */
1758 static inline void fill_command_sg(struct driver_data *dd,
1759 struct mtip_cmd *command,
1760 int nents)
1761 {
1762 int n;
1763 unsigned int dma_len;
1764 struct mtip_cmd_sg *command_sg;
1765 struct scatterlist *sg = command->sg;
1766
1767 command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
1768
1769 for (n = 0; n < nents; n++) {
1770 dma_len = sg_dma_len(sg);
1771 if (dma_len > 0x400000)
1772 dev_err(&dd->pdev->dev,
1773 "DMA segment length truncated\n");
1774 command_sg->info = __force_bit2int
1775 cpu_to_le32((dma_len-1) & 0x3FFFFF);
1776 command_sg->dba = __force_bit2int
1777 cpu_to_le32(sg_dma_address(sg));
1778 command_sg->dba_upper = __force_bit2int
1779 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
1780 command_sg++;
1781 sg++;
1782 }
1783 }
1784
1785 /*
1786 * @brief Execute a drive command.
1787 *
1788 * return value 0 The command completed successfully.
1789 * return value -1 An error occurred while executing the command.
1790 */
1791 static int exec_drive_task(struct mtip_port *port, u8 *command)
1792 {
1793 struct host_to_dev_fis fis;
1794 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
1795 unsigned int to;
1796
1797 /* Build the FIS. */
1798 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1799 fis.type = 0x27;
1800 fis.opts = 1 << 7;
1801 fis.command = command[0];
1802 fis.features = command[1];
1803 fis.sect_count = command[2];
1804 fis.sector = command[3];
1805 fis.cyl_low = command[4];
1806 fis.cyl_hi = command[5];
1807 fis.device = command[6] & ~0x10; /* Clear the dev bit*/
1808
1809 mtip_set_timeout(port->dd, &fis, &to, 0);
1810
1811 dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
1812 __func__,
1813 command[0],
1814 command[1],
1815 command[2],
1816 command[3],
1817 command[4],
1818 command[5],
1819 command[6]);
1820
1821 /* Execute the command. */
1822 if (mtip_exec_internal_command(port,
1823 &fis,
1824 5,
1825 0,
1826 0,
1827 0,
1828 GFP_KERNEL,
1829 to) < 0) {
1830 return -1;
1831 }
1832
1833 command[0] = reply->command; /* Status*/
1834 command[1] = reply->features; /* Error*/
1835 command[4] = reply->cyl_low;
1836 command[5] = reply->cyl_hi;
1837
1838 dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
1839 __func__,
1840 command[0],
1841 command[1],
1842 command[4],
1843 command[5]);
1844
1845 return 0;
1846 }
1847
1848 /*
1849 * @brief Execute a drive command.
1850 *
1851 * @param port Pointer to the port data structure.
1852 * @param command Pointer to the user specified command parameters.
1853 * @param user_buffer Pointer to the user space buffer where read sector
1854 * data should be copied.
1855 *
1856 * return value 0 The command completed successfully.
1857 * return value -EFAULT An error occurred while copying the completion
1858 * data to the user space buffer.
1859 * return value -1 An error occurred while executing the command.
1860 */
1861 static int exec_drive_command(struct mtip_port *port, u8 *command,
1862 void __user *user_buffer)
1863 {
1864 struct host_to_dev_fis fis;
1865 struct host_to_dev_fis *reply;
1866 u8 *buf = NULL;
1867 dma_addr_t dma_addr = 0;
1868 int rv = 0, xfer_sz = command[3];
1869 unsigned int to;
1870
1871 if (xfer_sz) {
1872 if (!user_buffer)
1873 return -EFAULT;
1874
1875 buf = dmam_alloc_coherent(&port->dd->pdev->dev,
1876 ATA_SECT_SIZE * xfer_sz,
1877 &dma_addr,
1878 GFP_KERNEL);
1879 if (!buf) {
1880 dev_err(&port->dd->pdev->dev,
1881 "Memory allocation failed (%d bytes)\n",
1882 ATA_SECT_SIZE * xfer_sz);
1883 return -ENOMEM;
1884 }
1885 memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
1886 }
1887
1888 /* Build the FIS. */
1889 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1890 fis.type = 0x27;
1891 fis.opts = 1 << 7;
1892 fis.command = command[0];
1893 fis.features = command[2];
1894 fis.sect_count = command[3];
1895 if (fis.command == ATA_CMD_SMART) {
1896 fis.sector = command[1];
1897 fis.cyl_low = 0x4F;
1898 fis.cyl_hi = 0xC2;
1899 }
1900
1901 mtip_set_timeout(port->dd, &fis, &to, 0);
1902
1903 if (xfer_sz)
1904 reply = (port->rxfis + RX_FIS_PIO_SETUP);
1905 else
1906 reply = (port->rxfis + RX_FIS_D2H_REG);
1907
1908 dbg_printk(MTIP_DRV_NAME
1909 " %s: User Command: cmd %x, sect %x, "
1910 "feat %x, sectcnt %x\n",
1911 __func__,
1912 command[0],
1913 command[1],
1914 command[2],
1915 command[3]);
1916
1917 /* Execute the command. */
1918 if (mtip_exec_internal_command(port,
1919 &fis,
1920 5,
1921 (xfer_sz ? dma_addr : 0),
1922 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
1923 0,
1924 GFP_KERNEL,
1925 to)
1926 < 0) {
1927 rv = -EFAULT;
1928 goto exit_drive_command;
1929 }
1930
1931 /* Collect the completion status. */
1932 command[0] = reply->command; /* Status*/
1933 command[1] = reply->features; /* Error*/
1934 command[2] = reply->sect_count;
1935
1936 dbg_printk(MTIP_DRV_NAME
1937 " %s: Completion Status: stat %x, "
1938 "err %x, nsect %x\n",
1939 __func__,
1940 command[0],
1941 command[1],
1942 command[2]);
1943
1944 if (xfer_sz) {
1945 if (copy_to_user(user_buffer,
1946 buf,
1947 ATA_SECT_SIZE * command[3])) {
1948 rv = -EFAULT;
1949 goto exit_drive_command;
1950 }
1951 }
1952 exit_drive_command:
1953 if (buf)
1954 dmam_free_coherent(&port->dd->pdev->dev,
1955 ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
1956 return rv;
1957 }
1958
1959 /*
1960 * Indicates whether a command has a single sector payload.
1961 *
1962 * @command passed to the device to perform the certain event.
1963 * @features passed to the device to perform the certain event.
1964 *
1965 * return value
1966 * 1 command is one that always has a single sector payload,
1967 * regardless of the value in the Sector Count field.
1968 * 0 otherwise
1969 *
1970 */
1971 static unsigned int implicit_sector(unsigned char command,
1972 unsigned char features)
1973 {
1974 unsigned int rv = 0;
1975
1976 /* list of commands that have an implicit sector count of 1 */
1977 switch (command) {
1978 case ATA_CMD_SEC_SET_PASS:
1979 case ATA_CMD_SEC_UNLOCK:
1980 case ATA_CMD_SEC_ERASE_PREP:
1981 case ATA_CMD_SEC_ERASE_UNIT:
1982 case ATA_CMD_SEC_FREEZE_LOCK:
1983 case ATA_CMD_SEC_DISABLE_PASS:
1984 case ATA_CMD_PMP_READ:
1985 case ATA_CMD_PMP_WRITE:
1986 rv = 1;
1987 break;
1988 case ATA_CMD_SET_MAX:
1989 if (features == ATA_SET_MAX_UNLOCK)
1990 rv = 1;
1991 break;
1992 case ATA_CMD_SMART:
1993 if ((features == ATA_SMART_READ_VALUES) ||
1994 (features == ATA_SMART_READ_THRESHOLDS))
1995 rv = 1;
1996 break;
1997 case ATA_CMD_CONF_OVERLAY:
1998 if ((features == ATA_DCO_IDENTIFY) ||
1999 (features == ATA_DCO_SET))
2000 rv = 1;
2001 break;
2002 }
2003 return rv;
2004 }
2005
2006 /*
2007 * Executes a taskfile
2008 * See ide_taskfile_ioctl() for derivation
2009 */
2010 static int exec_drive_taskfile(struct driver_data *dd,
2011 void __user *buf,
2012 ide_task_request_t *req_task,
2013 int outtotal)
2014 {
2015 struct host_to_dev_fis fis;
2016 struct host_to_dev_fis *reply;
2017 u8 *outbuf = NULL;
2018 u8 *inbuf = NULL;
2019 dma_addr_t outbuf_dma = 0;
2020 dma_addr_t inbuf_dma = 0;
2021 dma_addr_t dma_buffer = 0;
2022 int err = 0;
2023 unsigned int taskin = 0;
2024 unsigned int taskout = 0;
2025 u8 nsect = 0;
2026 unsigned int timeout;
2027 unsigned int force_single_sector;
2028 unsigned int transfer_size;
2029 unsigned long task_file_data;
2030 int intotal = outtotal + req_task->out_size;
2031 int erasemode = 0;
2032
2033 taskout = req_task->out_size;
2034 taskin = req_task->in_size;
2035 /* 130560 = 512 * 0xFF*/
2036 if (taskin > 130560 || taskout > 130560) {
2037 err = -EINVAL;
2038 goto abort;
2039 }
2040
2041 if (taskout) {
2042 outbuf = memdup_user(buf + outtotal, taskout);
2043 if (IS_ERR(outbuf)) {
2044 err = PTR_ERR(outbuf);
2045 outbuf = NULL;
2046 goto abort;
2047 }
2048 outbuf_dma = pci_map_single(dd->pdev,
2049 outbuf,
2050 taskout,
2051 DMA_TO_DEVICE);
2052 if (outbuf_dma == 0) {
2053 err = -ENOMEM;
2054 goto abort;
2055 }
2056 dma_buffer = outbuf_dma;
2057 }
2058
2059 if (taskin) {
2060 inbuf = memdup_user(buf + intotal, taskin);
2061 if (IS_ERR(inbuf)) {
2062 err = PTR_ERR(inbuf);
2063 inbuf = NULL;
2064 goto abort;
2065 }
2066 inbuf_dma = pci_map_single(dd->pdev,
2067 inbuf,
2068 taskin, DMA_FROM_DEVICE);
2069 if (inbuf_dma == 0) {
2070 err = -ENOMEM;
2071 goto abort;
2072 }
2073 dma_buffer = inbuf_dma;
2074 }
2075
2076 /* only supports PIO and non-data commands from this ioctl. */
2077 switch (req_task->data_phase) {
2078 case TASKFILE_OUT:
2079 nsect = taskout / ATA_SECT_SIZE;
2080 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
2081 break;
2082 case TASKFILE_IN:
2083 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
2084 break;
2085 case TASKFILE_NO_DATA:
2086 reply = (dd->port->rxfis + RX_FIS_D2H_REG);
2087 break;
2088 default:
2089 err = -EINVAL;
2090 goto abort;
2091 }
2092
2093 /* Build the FIS. */
2094 memset(&fis, 0, sizeof(struct host_to_dev_fis));
2095
2096 fis.type = 0x27;
2097 fis.opts = 1 << 7;
2098 fis.command = req_task->io_ports[7];
2099 fis.features = req_task->io_ports[1];
2100 fis.sect_count = req_task->io_ports[2];
2101 fis.lba_low = req_task->io_ports[3];
2102 fis.lba_mid = req_task->io_ports[4];
2103 fis.lba_hi = req_task->io_ports[5];
2104 /* Clear the dev bit*/
2105 fis.device = req_task->io_ports[6] & ~0x10;
2106
2107 if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
2108 req_task->in_flags.all =
2109 IDE_TASKFILE_STD_IN_FLAGS |
2110 (IDE_HOB_STD_IN_FLAGS << 8);
2111 fis.lba_low_ex = req_task->hob_ports[3];
2112 fis.lba_mid_ex = req_task->hob_ports[4];
2113 fis.lba_hi_ex = req_task->hob_ports[5];
2114 fis.features_ex = req_task->hob_ports[1];
2115 fis.sect_cnt_ex = req_task->hob_ports[2];
2116
2117 } else {
2118 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
2119 }
2120
2121 force_single_sector = implicit_sector(fis.command, fis.features);
2122
2123 if ((taskin || taskout) && (!fis.sect_count)) {
2124 if (nsect)
2125 fis.sect_count = nsect;
2126 else {
2127 if (!force_single_sector) {
2128 dev_warn(&dd->pdev->dev,
2129 "data movement but "
2130 "sect_count is 0\n");
2131 err = -EINVAL;
2132 goto abort;
2133 }
2134 }
2135 }
2136
2137 dbg_printk(MTIP_DRV_NAME
2138 " %s: cmd %x, feat %x, nsect %x,"
2139 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
2140 " head/dev %x\n",
2141 __func__,
2142 fis.command,
2143 fis.features,
2144 fis.sect_count,
2145 fis.lba_low,
2146 fis.lba_mid,
2147 fis.lba_hi,
2148 fis.device);
2149
2150 /* check for erase mode support during secure erase.*/
2151 if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
2152 (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
2153 erasemode = 1;
2154 }
2155
2156 mtip_set_timeout(dd, &fis, &timeout, erasemode);
2157
2158 /* Determine the correct transfer size.*/
2159 if (force_single_sector)
2160 transfer_size = ATA_SECT_SIZE;
2161 else
2162 transfer_size = ATA_SECT_SIZE * fis.sect_count;
2163
2164 /* Execute the command.*/
2165 if (mtip_exec_internal_command(dd->port,
2166 &fis,
2167 5,
2168 dma_buffer,
2169 transfer_size,
2170 0,
2171 GFP_KERNEL,
2172 timeout) < 0) {
2173 err = -EIO;
2174 goto abort;
2175 }
2176
2177 task_file_data = readl(dd->port->mmio+PORT_TFDATA);
2178
2179 if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
2180 reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
2181 req_task->io_ports[7] = reply->control;
2182 } else {
2183 reply = dd->port->rxfis + RX_FIS_D2H_REG;
2184 req_task->io_ports[7] = reply->command;
2185 }
2186
2187 /* reclaim the DMA buffers.*/
2188 if (inbuf_dma)
2189 pci_unmap_single(dd->pdev, inbuf_dma,
2190 taskin, DMA_FROM_DEVICE);
2191 if (outbuf_dma)
2192 pci_unmap_single(dd->pdev, outbuf_dma,
2193 taskout, DMA_TO_DEVICE);
2194 inbuf_dma = 0;
2195 outbuf_dma = 0;
2196
2197 /* return the ATA registers to the caller.*/
2198 req_task->io_ports[1] = reply->features;
2199 req_task->io_ports[2] = reply->sect_count;
2200 req_task->io_ports[3] = reply->lba_low;
2201 req_task->io_ports[4] = reply->lba_mid;
2202 req_task->io_ports[5] = reply->lba_hi;
2203 req_task->io_ports[6] = reply->device;
2204
2205 if (req_task->out_flags.all & 1) {
2206
2207 req_task->hob_ports[3] = reply->lba_low_ex;
2208 req_task->hob_ports[4] = reply->lba_mid_ex;
2209 req_task->hob_ports[5] = reply->lba_hi_ex;
2210 req_task->hob_ports[1] = reply->features_ex;
2211 req_task->hob_ports[2] = reply->sect_cnt_ex;
2212 }
2213 dbg_printk(MTIP_DRV_NAME
2214 " %s: Completion: stat %x,"
2215 "err %x, sect_cnt %x, lbalo %x,"
2216 "lbamid %x, lbahi %x, dev %x\n",
2217 __func__,
2218 req_task->io_ports[7],
2219 req_task->io_ports[1],
2220 req_task->io_ports[2],
2221 req_task->io_ports[3],
2222 req_task->io_ports[4],
2223 req_task->io_ports[5],
2224 req_task->io_ports[6]);
2225
2226 if (taskout) {
2227 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
2228 err = -EFAULT;
2229 goto abort;
2230 }
2231 }
2232 if (taskin) {
2233 if (copy_to_user(buf + intotal, inbuf, taskin)) {
2234 err = -EFAULT;
2235 goto abort;
2236 }
2237 }
2238 abort:
2239 if (inbuf_dma)
2240 pci_unmap_single(dd->pdev, inbuf_dma,
2241 taskin, DMA_FROM_DEVICE);
2242 if (outbuf_dma)
2243 pci_unmap_single(dd->pdev, outbuf_dma,
2244 taskout, DMA_TO_DEVICE);
2245 kfree(outbuf);
2246 kfree(inbuf);
2247
2248 return err;
2249 }
2250
2251 /*
2252 * Handle IOCTL calls from the Block Layer.
2253 *
2254 * This function is called by the Block Layer when it receives an IOCTL
2255 * command that it does not understand. If the IOCTL command is not supported
2256 * this function returns -ENOTTY.
2257 *
2258 * @dd Pointer to the driver data structure.
2259 * @cmd IOCTL command passed from the Block Layer.
2260 * @arg IOCTL argument passed from the Block Layer.
2261 *
2262 * return value
2263 * 0 The IOCTL completed successfully.
2264 * -ENOTTY The specified command is not supported.
2265 * -EFAULT An error occurred copying data to a user space buffer.
2266 * -EIO An error occurred while executing the command.
2267 */
2268 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2269 unsigned long arg)
2270 {
2271 switch (cmd) {
2272 case HDIO_GET_IDENTITY:
2273 {
2274 if (copy_to_user((void __user *)arg, dd->port->identify,
2275 sizeof(u16) * ATA_ID_WORDS))
2276 return -EFAULT;
2277 break;
2278 }
2279 case HDIO_DRIVE_CMD:
2280 {
2281 u8 drive_command[4];
2282
2283 /* Copy the user command info to our buffer. */
2284 if (copy_from_user(drive_command,
2285 (void __user *) arg,
2286 sizeof(drive_command)))
2287 return -EFAULT;
2288
2289 /* Execute the drive command. */
2290 if (exec_drive_command(dd->port,
2291 drive_command,
2292 (void __user *) (arg+4)))
2293 return -EIO;
2294
2295 /* Copy the status back to the users buffer. */
2296 if (copy_to_user((void __user *) arg,
2297 drive_command,
2298 sizeof(drive_command)))
2299 return -EFAULT;
2300
2301 break;
2302 }
2303 case HDIO_DRIVE_TASK:
2304 {
2305 u8 drive_command[7];
2306
2307 /* Copy the user command info to our buffer. */
2308 if (copy_from_user(drive_command,
2309 (void __user *) arg,
2310 sizeof(drive_command)))
2311 return -EFAULT;
2312
2313 /* Execute the drive command. */
2314 if (exec_drive_task(dd->port, drive_command))
2315 return -EIO;
2316
2317 /* Copy the status back to the users buffer. */
2318 if (copy_to_user((void __user *) arg,
2319 drive_command,
2320 sizeof(drive_command)))
2321 return -EFAULT;
2322
2323 break;
2324 }
2325 case HDIO_DRIVE_TASKFILE: {
2326 ide_task_request_t req_task;
2327 int ret, outtotal;
2328
2329 if (copy_from_user(&req_task, (void __user *) arg,
2330 sizeof(req_task)))
2331 return -EFAULT;
2332
2333 outtotal = sizeof(req_task);
2334
2335 ret = exec_drive_taskfile(dd, (void __user *) arg,
2336 &req_task, outtotal);
2337
2338 if (copy_to_user((void __user *) arg, &req_task,
2339 sizeof(req_task)))
2340 return -EFAULT;
2341
2342 return ret;
2343 }
2344
2345 default:
2346 return -EINVAL;
2347 }
2348 return 0;
2349 }
2350
2351 /*
2352 * Submit an IO to the hw
2353 *
2354 * This function is called by the block layer to issue an io
2355 * to the device. Upon completion, the callback function will
2356 * be called with the data parameter passed as the callback data.
2357 *
2358 * @dd Pointer to the driver data structure.
2359 * @start First sector to read.
2360 * @nsect Number of sectors to read.
2361 * @nents Number of entries in scatter list for the read command.
2362 * @tag The tag of this read command.
2363 * @callback Pointer to the function that should be called
2364 * when the read completes.
2365 * @data Callback data passed to the callback function
2366 * when the read completes.
2367 * @dir Direction (read or write)
2368 *
2369 * return value
2370 * None
2371 */
2372 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
2373 struct mtip_cmd *command, int nents,
2374 struct blk_mq_hw_ctx *hctx)
2375 {
2376 struct host_to_dev_fis *fis;
2377 struct mtip_port *port = dd->port;
2378 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2379 u64 start = blk_rq_pos(rq);
2380 unsigned int nsect = blk_rq_sectors(rq);
2381
2382 /* Map the scatter list for DMA access */
2383 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
2384
2385 prefetch(&port->flags);
2386
2387 command->scatter_ents = nents;
2388
2389 /*
2390 * The number of retries for this command before it is
2391 * reported as a failure to the upper layers.
2392 */
2393 command->retries = MTIP_MAX_RETRIES;
2394
2395 /* Fill out fis */
2396 fis = command->command;
2397 fis->type = 0x27;
2398 fis->opts = 1 << 7;
2399 if (dma_dir == DMA_FROM_DEVICE)
2400 fis->command = ATA_CMD_FPDMA_READ;
2401 else
2402 fis->command = ATA_CMD_FPDMA_WRITE;
2403 fis->lba_low = start & 0xFF;
2404 fis->lba_mid = (start >> 8) & 0xFF;
2405 fis->lba_hi = (start >> 16) & 0xFF;
2406 fis->lba_low_ex = (start >> 24) & 0xFF;
2407 fis->lba_mid_ex = (start >> 32) & 0xFF;
2408 fis->lba_hi_ex = (start >> 40) & 0xFF;
2409 fis->device = 1 << 6;
2410 fis->features = nsect & 0xFF;
2411 fis->features_ex = (nsect >> 8) & 0xFF;
2412 fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
2413 fis->sect_cnt_ex = 0;
2414 fis->control = 0;
2415 fis->res2 = 0;
2416 fis->res3 = 0;
2417 fill_command_sg(dd, command, nents);
2418
2419 if (unlikely(command->unaligned))
2420 fis->device |= 1 << 7;
2421
2422 /* Populate the command header */
2423 command->command_header->opts =
2424 __force_bit2int cpu_to_le32(
2425 (nents << 16) | 5 | AHCI_CMD_PREFETCH);
2426 command->command_header->byte_count = 0;
2427
2428 /*
2429 * Set the completion function and data for the command
2430 * within this layer.
2431 */
2432 command->comp_data = dd;
2433 command->comp_func = mtip_async_complete;
2434 command->direction = dma_dir;
2435
2436 /*
2437 * To prevent this command from being issued
2438 * if an internal command is in progress or error handling is active.
2439 */
2440 if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
2441 set_bit(rq->tag, port->cmds_to_issue);
2442 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2443 return;
2444 }
2445
2446 /* Issue the command to the hardware */
2447 mtip_issue_ncq_command(port, rq->tag);
2448 }
2449
2450 /*
2451 * Sysfs status dump.
2452 *
2453 * @dev Pointer to the device structure, passed by the kernrel.
2454 * @attr Pointer to the device_attribute structure passed by the kernel.
2455 * @buf Pointer to the char buffer that will receive the stats info.
2456 *
2457 * return value
2458 * The size, in bytes, of the data copied into buf.
2459 */
2460 static ssize_t mtip_hw_show_status(struct device *dev,
2461 struct device_attribute *attr,
2462 char *buf)
2463 {
2464 struct driver_data *dd = dev_to_disk(dev)->private_data;
2465 int size = 0;
2466
2467 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2468 size += sprintf(buf, "%s", "thermal_shutdown\n");
2469 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2470 size += sprintf(buf, "%s", "write_protect\n");
2471 else
2472 size += sprintf(buf, "%s", "online\n");
2473
2474 return size;
2475 }
2476
2477 static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2478
2479 /* debugsfs entries */
2480
2481 static ssize_t show_device_status(struct device_driver *drv, char *buf)
2482 {
2483 int size = 0;
2484 struct driver_data *dd, *tmp;
2485 unsigned long flags;
2486 char id_buf[42];
2487 u16 status = 0;
2488
2489 spin_lock_irqsave(&dev_lock, flags);
2490 size += sprintf(&buf[size], "Devices Present:\n");
2491 list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
2492 if (dd->pdev) {
2493 if (dd->port &&
2494 dd->port->identify &&
2495 dd->port->identify_valid) {
2496 strlcpy(id_buf,
2497 (char *) (dd->port->identify + 10), 21);
2498 status = *(dd->port->identify + 141);
2499 } else {
2500 memset(id_buf, 0, 42);
2501 status = 0;
2502 }
2503
2504 if (dd->port &&
2505 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2506 size += sprintf(&buf[size],
2507 " device %s %s (ftl rebuild %d %%)\n",
2508 dev_name(&dd->pdev->dev),
2509 id_buf,
2510 status);
2511 } else {
2512 size += sprintf(&buf[size],
2513 " device %s %s\n",
2514 dev_name(&dd->pdev->dev),
2515 id_buf);
2516 }
2517 }
2518 }
2519
2520 size += sprintf(&buf[size], "Devices Being Removed:\n");
2521 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
2522 if (dd->pdev) {
2523 if (dd->port &&
2524 dd->port->identify &&
2525 dd->port->identify_valid) {
2526 strlcpy(id_buf,
2527 (char *) (dd->port->identify+10), 21);
2528 status = *(dd->port->identify + 141);
2529 } else {
2530 memset(id_buf, 0, 42);
2531 status = 0;
2532 }
2533
2534 if (dd->port &&
2535 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2536 size += sprintf(&buf[size],
2537 " device %s %s (ftl rebuild %d %%)\n",
2538 dev_name(&dd->pdev->dev),
2539 id_buf,
2540 status);
2541 } else {
2542 size += sprintf(&buf[size],
2543 " device %s %s\n",
2544 dev_name(&dd->pdev->dev),
2545 id_buf);
2546 }
2547 }
2548 }
2549 spin_unlock_irqrestore(&dev_lock, flags);
2550
2551 return size;
2552 }
2553
2554 static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
2555 size_t len, loff_t *offset)
2556 {
2557 struct driver_data *dd = (struct driver_data *)f->private_data;
2558 int size = *offset;
2559 char *buf;
2560 int rv = 0;
2561
2562 if (!len || *offset)
2563 return 0;
2564
2565 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2566 if (!buf) {
2567 dev_err(&dd->pdev->dev,
2568 "Memory allocation: status buffer\n");
2569 return -ENOMEM;
2570 }
2571
2572 size += show_device_status(NULL, buf);
2573
2574 *offset = size <= len ? size : len;
2575 size = copy_to_user(ubuf, buf, *offset);
2576 if (size)
2577 rv = -EFAULT;
2578
2579 kfree(buf);
2580 return rv ? rv : *offset;
2581 }
2582
2583 static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2584 size_t len, loff_t *offset)
2585 {
2586 struct driver_data *dd = (struct driver_data *)f->private_data;
2587 char *buf;
2588 u32 group_allocated;
2589 int size = *offset;
2590 int n, rv = 0;
2591
2592 if (!len || size)
2593 return 0;
2594
2595 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2596 if (!buf) {
2597 dev_err(&dd->pdev->dev,
2598 "Memory allocation: register buffer\n");
2599 return -ENOMEM;
2600 }
2601
2602 size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
2603
2604 for (n = dd->slot_groups-1; n >= 0; n--)
2605 size += sprintf(&buf[size], "%08X ",
2606 readl(dd->port->s_active[n]));
2607
2608 size += sprintf(&buf[size], "]\n");
2609 size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
2610
2611 for (n = dd->slot_groups-1; n >= 0; n--)
2612 size += sprintf(&buf[size], "%08X ",
2613 readl(dd->port->cmd_issue[n]));
2614
2615 size += sprintf(&buf[size], "]\n");
2616 size += sprintf(&buf[size], "H/ Completed : [ 0x");
2617
2618 for (n = dd->slot_groups-1; n >= 0; n--)
2619 size += sprintf(&buf[size], "%08X ",
2620 readl(dd->port->completed[n]));
2621
2622 size += sprintf(&buf[size], "]\n");
2623 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
2624 readl(dd->port->mmio + PORT_IRQ_STAT));
2625 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
2626 readl(dd->mmio + HOST_IRQ_STAT));
2627 size += sprintf(&buf[size], "\n");
2628
2629 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
2630
2631 for (n = dd->slot_groups-1; n >= 0; n--) {
2632 if (sizeof(long) > sizeof(u32))
2633 group_allocated =
2634 dd->port->cmds_to_issue[n/2] >> (32*(n&1));
2635 else
2636 group_allocated = dd->port->cmds_to_issue[n];
2637 size += sprintf(&buf[size], "%08X ", group_allocated);
2638 }
2639 size += sprintf(&buf[size], "]\n");
2640
2641 *offset = size <= len ? size : len;
2642 size = copy_to_user(ubuf, buf, *offset);
2643 if (size)
2644 rv = -EFAULT;
2645
2646 kfree(buf);
2647 return rv ? rv : *offset;
2648 }
2649
2650 static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2651 size_t len, loff_t *offset)
2652 {
2653 struct driver_data *dd = (struct driver_data *)f->private_data;
2654 char *buf;
2655 int size = *offset;
2656 int rv = 0;
2657
2658 if (!len || size)
2659 return 0;
2660
2661 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2662 if (!buf) {
2663 dev_err(&dd->pdev->dev,
2664 "Memory allocation: flag buffer\n");
2665 return -ENOMEM;
2666 }
2667
2668 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
2669 dd->port->flags);
2670 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
2671 dd->dd_flag);
2672
2673 *offset = size <= len ? size : len;
2674 size = copy_to_user(ubuf, buf, *offset);
2675 if (size)
2676 rv = -EFAULT;
2677
2678 kfree(buf);
2679 return rv ? rv : *offset;
2680 }
2681
2682 static const struct file_operations mtip_device_status_fops = {
2683 .owner = THIS_MODULE,
2684 .open = simple_open,
2685 .read = mtip_hw_read_device_status,
2686 .llseek = no_llseek,
2687 };
2688
2689 static const struct file_operations mtip_regs_fops = {
2690 .owner = THIS_MODULE,
2691 .open = simple_open,
2692 .read = mtip_hw_read_registers,
2693 .llseek = no_llseek,
2694 };
2695
2696 static const struct file_operations mtip_flags_fops = {
2697 .owner = THIS_MODULE,
2698 .open = simple_open,
2699 .read = mtip_hw_read_flags,
2700 .llseek = no_llseek,
2701 };
2702
2703 /*
2704 * Create the sysfs related attributes.
2705 *
2706 * @dd Pointer to the driver data structure.
2707 * @kobj Pointer to the kobj for the block device.
2708 *
2709 * return value
2710 * 0 Operation completed successfully.
2711 * -EINVAL Invalid parameter.
2712 */
2713 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2714 {
2715 if (!kobj || !dd)
2716 return -EINVAL;
2717
2718 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2719 dev_warn(&dd->pdev->dev,
2720 "Error creating 'status' sysfs entry\n");
2721 return 0;
2722 }
2723
2724 /*
2725 * Remove the sysfs related attributes.
2726 *
2727 * @dd Pointer to the driver data structure.
2728 * @kobj Pointer to the kobj for the block device.
2729 *
2730 * return value
2731 * 0 Operation completed successfully.
2732 * -EINVAL Invalid parameter.
2733 */
2734 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2735 {
2736 if (!kobj || !dd)
2737 return -EINVAL;
2738
2739 sysfs_remove_file(kobj, &dev_attr_status.attr);
2740
2741 return 0;
2742 }
2743
2744 static int mtip_hw_debugfs_init(struct driver_data *dd)
2745 {
2746 if (!dfs_parent)
2747 return -1;
2748
2749 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
2750 if (IS_ERR_OR_NULL(dd->dfs_node)) {
2751 dev_warn(&dd->pdev->dev,
2752 "Error creating node %s under debugfs\n",
2753 dd->disk->disk_name);
2754 dd->dfs_node = NULL;
2755 return -1;
2756 }
2757
2758 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
2759 &mtip_flags_fops);
2760 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
2761 &mtip_regs_fops);
2762
2763 return 0;
2764 }
2765
2766 static void mtip_hw_debugfs_exit(struct driver_data *dd)
2767 {
2768 if (dd->dfs_node)
2769 debugfs_remove_recursive(dd->dfs_node);
2770 }
2771
2772 /*
2773 * Perform any init/resume time hardware setup
2774 *
2775 * @dd Pointer to the driver data structure.
2776 *
2777 * return value
2778 * None
2779 */
2780 static inline void hba_setup(struct driver_data *dd)
2781 {
2782 u32 hwdata;
2783 hwdata = readl(dd->mmio + HOST_HSORG);
2784
2785 /* interrupt bug workaround: use only 1 IS bit.*/
2786 writel(hwdata |
2787 HSORG_DISABLE_SLOTGRP_INTR |
2788 HSORG_DISABLE_SLOTGRP_PXIS,
2789 dd->mmio + HOST_HSORG);
2790 }
2791
2792 static int mtip_device_unaligned_constrained(struct driver_data *dd)
2793 {
2794 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
2795 }
2796
2797 /*
2798 * Detect the details of the product, and store anything needed
2799 * into the driver data structure. This includes product type and
2800 * version and number of slot groups.
2801 *
2802 * @dd Pointer to the driver data structure.
2803 *
2804 * return value
2805 * None
2806 */
2807 static void mtip_detect_product(struct driver_data *dd)
2808 {
2809 u32 hwdata;
2810 unsigned int rev, slotgroups;
2811
2812 /*
2813 * HBA base + 0xFC [15:0] - vendor-specific hardware interface
2814 * info register:
2815 * [15:8] hardware/software interface rev#
2816 * [ 3] asic-style interface
2817 * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
2818 */
2819 hwdata = readl(dd->mmio + HOST_HSORG);
2820
2821 dd->product_type = MTIP_PRODUCT_UNKNOWN;
2822 dd->slot_groups = 1;
2823
2824 if (hwdata & 0x8) {
2825 dd->product_type = MTIP_PRODUCT_ASICFPGA;
2826 rev = (hwdata & HSORG_HWREV) >> 8;
2827 slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
2828 dev_info(&dd->pdev->dev,
2829 "ASIC-FPGA design, HS rev 0x%x, "
2830 "%i slot groups [%i slots]\n",
2831 rev,
2832 slotgroups,
2833 slotgroups * 32);
2834
2835 if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
2836 dev_warn(&dd->pdev->dev,
2837 "Warning: driver only supports "
2838 "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
2839 slotgroups = MTIP_MAX_SLOT_GROUPS;
2840 }
2841 dd->slot_groups = slotgroups;
2842 return;
2843 }
2844
2845 dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
2846 }
2847
2848 /*
2849 * Blocking wait for FTL rebuild to complete
2850 *
2851 * @dd Pointer to the DRIVER_DATA structure.
2852 *
2853 * return value
2854 * 0 FTL rebuild completed successfully
2855 * -EFAULT FTL rebuild error/timeout/interruption
2856 */
2857 static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2858 {
2859 unsigned long timeout, cnt = 0, start;
2860
2861 dev_warn(&dd->pdev->dev,
2862
CommentReported: 19 Mar 2016 [Home] » |